code
stringlengths
3
1.01M
repo_name
stringlengths
5
116
path
stringlengths
3
311
language
stringclasses
30 values
license
stringclasses
15 values
size
int64
3
1.01M
/* * ************************************************************************************* * Copyright (C) 2006-2015 EsperTech, Inc. All rights reserved. * * http://www.espertech.com/esper * * http://www.espertech.com * * ---------------------------------------------------------------------------------- * * The software in this package is published under the terms of the GPL license * * a copy of which has been included with this distribution in the license.txt file. * * ************************************************************************************* */ package com.espertech.esper.regression.client; import com.espertech.esper.client.UpdateListener; import com.espertech.esper.client.EventBean; import com.espertech.esper.collection.Pair; import java.util.List; import java.util.Collections; import java.util.ArrayList; public class SupportListenerSleeping implements UpdateListener { private List<Pair<Long, EventBean[]>> newEvents = Collections.synchronizedList(new ArrayList<Pair<Long, EventBean[]>>()); private final long sleepTime; public SupportListenerSleeping(long sleepTime) { this.sleepTime = sleepTime; } public void update(EventBean[] newData, EventBean[] oldEvents) { long time = System.nanoTime(); newEvents.add(new Pair<Long, EventBean[]>(time, newData)); try { Thread.sleep(sleepTime); } catch (InterruptedException e) { throw new RuntimeException(e); } } public List<Pair<Long, EventBean[]>> getNewEvents() { return newEvents; } }
b-cuts/esper
esper/src/test/java/com/espertech/esper/regression/client/SupportListenerSleeping.java
Java
gpl-2.0
1,771
/* * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.oracle.graal.replacements.nodes; import static com.oracle.graal.graph.UnsafeAccess.*; import com.oracle.graal.api.meta.*; import com.oracle.graal.nodes.*; import com.oracle.graal.nodes.extended.*; import com.oracle.graal.nodes.spi.*; import com.oracle.graal.nodes.type.*; /** * A special purpose store node that differs from {@link UnsafeStoreNode} in that it is not a * {@link StateSplit} and takes a computed address instead of an object. */ public class DirectStoreNode extends FixedWithNextNode implements LIRLowerable { @Input private ValueNode address; @Input private ValueNode value; private final Kind kind; public DirectStoreNode(ValueNode address, ValueNode value, Kind kind) { super(StampFactory.forVoid()); this.address = address; this.value = value; this.kind = kind; } @Override public void generate(LIRGeneratorTool gen) { Value v = gen.operand(value); gen.emitStore(kind, gen.operand(address), v, null); } /* * The kind of the store is provided explicitly in these intrinsics because it is not always * possible to determine the kind from the given value during compilation (because stack kinds * are used). */ @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, boolean value, @ConstantNodeParameter Kind kind) { int b = value ? 1 : 0; unsafe.putByte(address, (byte) b); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, byte value, @ConstantNodeParameter Kind kind) { unsafe.putByte(address, value); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, short value, @ConstantNodeParameter Kind kind) { unsafe.putShort(address, value); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, char value, @ConstantNodeParameter Kind kind) { unsafe.putChar(address, value); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, int value, @ConstantNodeParameter Kind kind) { unsafe.putInt(address, value); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, long value, @ConstantNodeParameter Kind kind) { unsafe.putLong(address, value); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, float value, @ConstantNodeParameter Kind kind) { unsafe.putFloat(address, value); } @SuppressWarnings("unused") @NodeIntrinsic public static void store(long address, double value, @ConstantNodeParameter Kind kind) { unsafe.putDouble(address, value); } }
arodchen/MaxSim
graal/graal/com.oracle.graal.replacements/src/com/oracle/graal/replacements/nodes/DirectStoreNode.java
Java
gpl-2.0
3,861
/* Gpredict: Real-time satellite tracking and orbit prediction program Copyright (C) 2001-2010 Alexandru Csete, OZ9AEC. Authors: Alexandru Csete <oz9aec@gmail.com> Comments, questions and bugreports should be submitted via http://sourceforge.net/projects/gpredict/ More details can be found at the project home page: http://gpredict.oz9aec.net/ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, visit http://www.fsf.org/ */ #ifdef HAVE_CONFIG_H #include <build-config.h> #endif #include <glib/gi18n.h> #include <gtk/gtk.h> #include "compat.h" #include "gtk-sat-tree.h" #include "sat-log.h" static void gtk_sat_tree_class_init(GtkSatTreeClass * class); static void gtk_sat_tree_init(GtkSatTree * sat_tree); static void gtk_sat_tree_destroy(GtkObject * object); static GtkTreeModel *create_and_fill_model(guint flags); static void column_toggled(GtkCellRendererToggle * cell, gchar * path_str, gpointer data); static gint scan_tle_file(const gchar * path, GtkTreeStore * store, GtkTreeIter * node); static gboolean check_and_select_sat(GtkTreeModel * model, GtkTreePath * path, GtkTreeIter * iter, gpointer data); static gboolean uncheck_sat(GtkTreeModel * model, GtkTreePath * path, GtkTreeIter * iter, gpointer data); static gint compare_func(GtkTreeModel * model, GtkTreeIter * a, GtkTreeIter * b, gpointer userdata); static void expand_cb(GtkWidget * button, gpointer tree); static void collapse_cb(GtkWidget * button, gpointer tree); static GtkVBoxClass *parent_class = NULL; GType gtk_sat_tree_get_type() { static GType gtk_sat_tree_type = 0; if (!gtk_sat_tree_type) { static const GTypeInfo gtk_sat_tree_info = { sizeof(GtkSatTreeClass), NULL, /* base_init */ NULL, /* base_finalize */ (GClassInitFunc) gtk_sat_tree_class_init, NULL, /* class_finalize */ NULL, /* class_data */ sizeof(GtkSatTree), 1, /* n_preallocs */ (GInstanceInitFunc) gtk_sat_tree_init, NULL }; gtk_sat_tree_type = g_type_register_static(GTK_TYPE_VBOX, "GtkSatTree", &gtk_sat_tree_info, 0); } return gtk_sat_tree_type; } static void gtk_sat_tree_class_init(GtkSatTreeClass * class) { GObjectClass *gobject_class; GtkObjectClass *object_class; GtkWidgetClass *widget_class; GtkContainerClass *container_class; gobject_class = G_OBJECT_CLASS(class); object_class = (GtkObjectClass *) class; widget_class = (GtkWidgetClass *) class; container_class = (GtkContainerClass *) class; parent_class = g_type_class_peek_parent(class); object_class->destroy = gtk_sat_tree_destroy; } static void gtk_sat_tree_init(GtkSatTree * sat_tree) { (void)sat_tree; } static void gtk_sat_tree_destroy(GtkObject * object) { GtkSatTree *sat_tree = GTK_SAT_TREE(object); /* clear list of selected satellites */ /* crashes on 2. instance: g_slist_free (sat_tree->selection); */ guint n, i; gpointer data; n = g_slist_length(sat_tree->selection); for (i = 0; i < n; i++) { /* get the first element and delete it */ data = g_slist_nth_data(sat_tree->selection, 0); sat_tree->selection = g_slist_remove(sat_tree->selection, data); } (*GTK_OBJECT_CLASS(parent_class)->destroy) (object); } /** * Create a new GtkSatTree widget * * @param flags Flags indicating which columns should be visible * (see gtk_sat_tree_flag_t) * @return A GtkSatTree widget. */ GtkWidget *gtk_sat_tree_new(guint flags) { GtkWidget *widget; GtkSatTree *sat_tree; GtkTreeModel *model; GtkCellRenderer *renderer; GtkTreeViewColumn *column; GtkWidget *hbox; GtkWidget *expbut; GtkWidget *colbut; if (!flags) flags = GTK_SAT_TREE_DEFAULT_FLAGS; widget = g_object_new(GTK_TYPE_SAT_TREE, NULL); sat_tree = GTK_SAT_TREE(widget); sat_tree->flags = flags; /* create list and model */ sat_tree->tree = gtk_tree_view_new(); gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(sat_tree->tree), TRUE); model = create_and_fill_model(flags); gtk_tree_view_set_model(GTK_TREE_VIEW(sat_tree->tree), model); g_object_unref(model); /* sort the tree by name */ gtk_tree_sortable_set_sort_func(GTK_TREE_SORTABLE(model), GTK_SAT_TREE_COL_NAME, compare_func, NULL, NULL); gtk_tree_sortable_set_sort_column_id(GTK_TREE_SORTABLE(model), GTK_SAT_TREE_COL_NAME, GTK_SORT_ASCENDING); /* create tree view columns */ /* label column */ renderer = gtk_cell_renderer_text_new(); column = gtk_tree_view_column_new_with_attributes(_("Satellite"), renderer, "text", GTK_SAT_TREE_COL_NAME, NULL); gtk_tree_view_insert_column(GTK_TREE_VIEW(sat_tree->tree), column, -1); if (!(flags & GTK_SAT_TREE_FLAG_NAME)) gtk_tree_view_column_set_visible(column, FALSE); /* catalogue number */ renderer = gtk_cell_renderer_text_new(); column = gtk_tree_view_column_new_with_attributes(_("Catnum"), renderer, "text", GTK_SAT_TREE_COL_CATNUM, "visible", GTK_SAT_TREE_COL_VIS, NULL); gtk_tree_view_insert_column(GTK_TREE_VIEW(sat_tree->tree), column, -1); if (!(flags & GTK_SAT_TREE_FLAG_CATNUM)) gtk_tree_view_column_set_visible(column, FALSE); /* epoch */ renderer = gtk_cell_renderer_text_new(); column = gtk_tree_view_column_new_with_attributes(_("Epoch"), renderer, "text", GTK_SAT_TREE_COL_EPOCH, "visible", GTK_SAT_TREE_COL_VIS, NULL); gtk_tree_view_insert_column(GTK_TREE_VIEW(sat_tree->tree), column, -1); if (!(flags & GTK_SAT_TREE_FLAG_EPOCH)) gtk_tree_view_column_set_visible(column, FALSE); /* checkbox column */ renderer = gtk_cell_renderer_toggle_new(); sat_tree->handler_id = g_signal_connect(renderer, "toggled", G_CALLBACK(column_toggled), widget); column = gtk_tree_view_column_new_with_attributes(_("Selected"), renderer, "active", GTK_SAT_TREE_COL_SEL, "visible", GTK_SAT_TREE_COL_VIS, NULL); gtk_tree_view_append_column(GTK_TREE_VIEW(sat_tree->tree), column); gtk_tree_view_column_set_alignment(column, 0.5); if (!(flags & GTK_SAT_TREE_FLAG_SEL)) gtk_tree_view_column_set_visible(column, FALSE); /* scrolled window */ GTK_SAT_TREE(widget)->swin = gtk_scrolled_window_new(NULL, NULL); gtk_scrolled_window_set_policy(GTK_SCROLLED_WINDOW (GTK_SAT_TREE(widget)->swin), GTK_POLICY_NEVER, GTK_POLICY_AUTOMATIC); gtk_container_add(GTK_CONTAINER(GTK_SAT_TREE(widget)->swin), GTK_SAT_TREE(widget)->tree); //gtk_container_add (GTK_CONTAINER (widget), GTK_SAT_TREE (widget)->swin); gtk_box_pack_start(GTK_BOX(widget), GTK_SAT_TREE(widget)->swin, TRUE, TRUE, 0); /* expand and collabse buttons */ expbut = gtk_button_new_with_label(_("Expand")); gtk_widget_set_tooltip_text(expbut, _ ("Expand all nodes in the tree to make it searchable")); g_signal_connect(expbut, "clicked", G_CALLBACK(expand_cb), sat_tree); colbut = gtk_button_new_with_label(_("Collapse")); gtk_widget_set_tooltip_text(colbut, _("Collapse all nodes in the tree")); g_signal_connect(colbut, "clicked", G_CALLBACK(collapse_cb), sat_tree); hbox = gtk_hbutton_box_new(); gtk_button_box_set_layout(GTK_BUTTON_BOX(hbox), GTK_BUTTONBOX_START); gtk_box_pack_start(GTK_BOX(hbox), expbut, FALSE, TRUE, 0); gtk_box_pack_start(GTK_BOX(hbox), colbut, FALSE, TRUE, 0); gtk_box_pack_start(GTK_BOX(widget), hbox, FALSE, FALSE, 5); gtk_widget_show_all(widget); /* initialise selection */ GTK_SAT_TREE(widget)->selection = NULL; return widget; } /** FIXME: flags not needed here */ static GtkTreeModel *create_and_fill_model(guint flags) { GtkTreeStore *store; /* the list store data structure */ GtkTreeIter node; /* new top level node added to the tree store */ GDir *dir; gchar *dirname; gchar *path; gchar *nodename; gchar **buffv; const gchar *fname; guint num = 0; (void)flags; /* create a new tree store */ store = gtk_tree_store_new(GTK_SAT_TREE_COL_NUM, G_TYPE_STRING, // name G_TYPE_INT, // catnum G_TYPE_STRING, // epoch G_TYPE_BOOLEAN, // selected G_TYPE_BOOLEAN // visible ); dirname = g_strconcat(g_get_home_dir(), G_DIR_SEPARATOR_S, ".gpredict2", G_DIR_SEPARATOR_S, "tle", NULL); sat_log_log(SAT_LOG_LEVEL_DEBUG, _("%s:%d: Directory is: %s"), __FILE__, __LINE__, dirname); dir = g_dir_open(dirname, 0, NULL); /* no tle files */ if (!dir) { sat_log_log(SAT_LOG_LEVEL_ERROR, _("%s:%d: No .tle files found in %s."), __FILE__, __LINE__, dirname); g_free(dirname); return GTK_TREE_MODEL(store);; } /* Scan data directory for .tle files. For each file scan through the file and add entry to the tree. */ while ((fname = g_dir_read_name(dir))) { if (g_str_has_suffix(fname, ".tle")) { buffv = g_strsplit(fname, ".tle", 0); nodename = g_strdup(buffv[0]); nodename[0] = g_ascii_toupper(nodename[0]); /* create a new top level node in the tree */ gtk_tree_store_append(store, &node, NULL); gtk_tree_store_set(store, &node, GTK_SAT_TREE_COL_NAME, nodename, GTK_SAT_TREE_COL_VIS, FALSE, -1); /* build full path til file and sweep it for sats */ path = g_strconcat(dirname, G_DIR_SEPARATOR_S, fname, NULL); num = scan_tle_file(path, store, &node); g_free(path); g_free(nodename); g_strfreev(buffv); sat_log_log(SAT_LOG_LEVEL_INFO, _("%s:%d: Read %d sats from %s "), __FILE__, __LINE__, num, fname); } } g_dir_close(dir); g_free(dirname); return GTK_TREE_MODEL(store); } /** * Scan .tle file and add satellites to GtkTreeStore. * * @param path Full path of the tle file. * @param store The GtkTreeStore to store the satellites into. * @param node The parent node for the satellites * @return The number of satellites that have been read into the tree. */ static gint scan_tle_file(const gchar * path, GtkTreeStore * store, GtkTreeIter * node) { guint i = 0; guint j; GIOChannel *tlefile; GError *error = NULL; GtkTreeIter sat_iter; gchar *line; gsize length; gchar catstr[6]; gchar *satnam; guint catnum; /* open IO channel and read 3 lines at a time */ tlefile = g_io_channel_new_file(path, "r", &error); if (error != NULL) { sat_log_log(SAT_LOG_LEVEL_ERROR, _("%s:%d: Failed to open %s (%s)"), __FILE__, __LINE__, path, error->message); g_clear_error(&error); } else if (tlefile) { /*** FIXME: More error handling please */ while (g_io_channel_read_line(tlefile, &line, &length, NULL, NULL) != G_IO_STATUS_EOF) { /* satellite name can be found in the first line */ satnam = g_strdup(line); g_strchomp(satnam); /* free allocated line */ g_free(line); /* extract catnum from second line; index 2..6 */ g_io_channel_read_line(tlefile, &line, &length, NULL, NULL); for (j = 2; j < 7; j++) { catstr[j - 2] = line[j]; } catstr[5] = '\0'; catnum = (guint) g_ascii_strtod(catstr, NULL); /* insert satnam and catnum */ gtk_tree_store_append(store, &sat_iter, node); gtk_tree_store_set(store, &sat_iter, GTK_SAT_TREE_COL_NAME, satnam, GTK_SAT_TREE_COL_CATNUM, catnum, GTK_SAT_TREE_COL_SEL, FALSE, GTK_SAT_TREE_COL_VIS, TRUE, -1); g_free(satnam); g_free(line); /* read the third line */ g_io_channel_read_line(tlefile, &line, &length, NULL, NULL); g_free(line); i++; } /* close IO chanel; don't care about status */ g_io_channel_shutdown(tlefile, TRUE, NULL); g_io_channel_unref(tlefile); } return i; } /** * Manage toggle signals. * * @param cell cell. * @param path_str Path string. * @param data Pointer to the GtkSatTree widget. * * This function is called when the user toggles the visibility for a column. * It will add or remove the toggled satellite from the list of selected sats. */ static void column_toggled(GtkCellRendererToggle * cell, gchar * path_str, gpointer data) { GtkSatTree *sat_tree = GTK_SAT_TREE(data); GtkTreeModel *model = gtk_tree_view_get_model(GTK_TREE_VIEW(sat_tree->tree)); GtkTreePath *path = gtk_tree_path_new_from_string(path_str); GtkTreeIter iter; gboolean toggle_item; guint catnum; (void)cell; /* get toggled iter */ gtk_tree_model_get_iter(model, &iter, path); gtk_tree_model_get(model, &iter, GTK_SAT_TREE_COL_CATNUM, &catnum, GTK_SAT_TREE_COL_SEL, &toggle_item, -1); /* do something with the value */ toggle_item ^= 1; if (toggle_item) { /* only append if sat not already in list */ if (!g_slist_find(sat_tree->selection, GUINT_TO_POINTER(catnum))) { sat_tree->selection = g_slist_append(sat_tree->selection, GUINT_TO_POINTER(catnum)); sat_log_log(SAT_LOG_LEVEL_DEBUG, _("%s:%d: Satellite %d selected."), __FILE__, __LINE__, catnum); /* Scan the tree for other instances of this sat. For example is CUTE-1.7 present in both AMATEUR and CUBESAT. We will need access to both the sat_tree and the catnum in the foreach callback, so we attach catnum as data to the sat_tree */ g_object_set_data(G_OBJECT(sat_tree), "tmp", GUINT_TO_POINTER(catnum)); /* find the satellite in the tree */ gtk_tree_model_foreach(model, check_and_select_sat, sat_tree); } else { sat_log_log(SAT_LOG_LEVEL_INFO, _("%s:%d: Satellite %d already selected; skip..."), __FILE__, __LINE__, catnum); } } else { sat_tree->selection = g_slist_remove(sat_tree->selection, GUINT_TO_POINTER(catnum)); sat_log_log(SAT_LOG_LEVEL_DEBUG, _("%s:%d: Satellite %d de-selected."), __FILE__, __LINE__, catnum); /* Scan the tree for other instances of this sat. For example is CUTE-1.7 present in both AMATEUR and CUBESAT. We will need access to both the sat_tree and the catnum in the foreach callback, so we attach catnum as data to the sat_tree */ g_object_set_data(G_OBJECT(sat_tree), "tmp", GUINT_TO_POINTER(catnum)); /* find the satellite in the tree */ gtk_tree_model_foreach(model, uncheck_sat, sat_tree); } /* set new value */ gtk_tree_store_set(GTK_TREE_STORE(model), &iter, GTK_SAT_TREE_COL_SEL, toggle_item, -1); gtk_tree_path_free(path); } /** * Select a satellite in the GtkSatTree. * * @param sat_tree The GtkSatTree widget. * @param catnum Catalogue number of satellite to be selected. */ void gtk_sat_tree_select(GtkSatTree * sat_tree, guint catnum) { /* sanity check */ if ((sat_tree == NULL) || !IS_GTK_SAT_TREE(sat_tree)) { sat_log_log(SAT_LOG_LEVEL_ERROR, _("%s: Invalid GtkSatTree!"), __func__); return; } if (!g_slist_find(sat_tree->selection, GUINT_TO_POINTER(catnum))) { GtkTreeModel *model = gtk_tree_view_get_model(GTK_TREE_VIEW(sat_tree->tree)); /* we will need access to both the sat_tree and the catnum in the foreach callback, so we attach catnum as data to the sat_tree */ g_object_set_data(G_OBJECT(sat_tree), "tmp", GUINT_TO_POINTER(catnum)); /* find the satellite in the tree */ gtk_tree_model_foreach(model, check_and_select_sat, sat_tree); } else { /* else do nothing since the sat is already selected */ sat_log_log(SAT_LOG_LEVEL_INFO, _("%s: Satellite %d already selected; skip..."), __func__, catnum); } } /** * Foreach callback for checking and selecting a satellite. * * @param model The GtkTreeModel. * @param path The GtkTreePath of the current item. * @param iter The GtkTreeIter of the current item. * @param data Pointer to the GtkSatTree structure. * @return Alway FALSE to let the for-each run to till end. * * This function is used as foreach-callback in the gtk_sat_tree_select function. * The purpoise of the function is to set the check box to chacked state and add * the satellite in question to the selection list. The catalogue number of the * satellite to be selected is attached as data to the GtkSatTree (key = tmp). * * The function is also used in the column_toggled callback function with the * purpose of locating and selecting other instances of the satellite than the * one, on which the user clicked on (meaning: some sats can be found in several * TLE file and we want to chak them all, not just the clicked instance). */ static gboolean check_and_select_sat(GtkTreeModel * model, GtkTreePath * path, GtkTreeIter * iter, gpointer data) { GtkSatTree *sat_tree = GTK_SAT_TREE(data); guint cat1, cat2; (void)path; cat1 = GPOINTER_TO_UINT(g_object_get_data(G_OBJECT(data), "tmp")); gtk_tree_model_get(model, iter, GTK_SAT_TREE_COL_CATNUM, &cat2, -1); if (cat1 == cat2) { /* we have a match */ gtk_tree_store_set(GTK_TREE_STORE(model), iter, GTK_SAT_TREE_COL_SEL, TRUE, -1); /* only append if sat not already in list */ if (!g_slist_find(sat_tree->selection, GUINT_TO_POINTER(cat1))) { sat_tree->selection = g_slist_append(sat_tree->selection, GUINT_TO_POINTER(cat1)); sat_log_log(SAT_LOG_LEVEL_DEBUG, _("%s:%d: Satellite %d selected."), __FILE__, __LINE__, cat1); } else { sat_log_log(SAT_LOG_LEVEL_INFO, _("%s:%d: Satellite %d already selected; skip..."), __FILE__, __LINE__, cat1); } /* If we return TRUE here, the foreach would terminate. We let it run to allow GtkSatTree to mark all instances of sat the satellite (some sats may be present in two or more .tle files. */ //return TRUE; } /* continue in order to catch ALL instances of sat */ return FALSE; } /** * Foreach callback for unchecking a satellite. * * @param model The GtkTreeModel. * @param path The GtkTreePath of the current item. * @param iter The GtkTreeIter of the current item. * @param data Pointer to the GtkSatTree structure. * @return Alway FALSE to let the for-each run to till end. * * This function is very similar to the check_and_select callback except that it * is used only to uncheck a deselected satellite. */ static gboolean uncheck_sat(GtkTreeModel * model, GtkTreePath * path, GtkTreeIter * iter, gpointer data) { guint cat1, cat2; (void)path; cat1 = GPOINTER_TO_UINT(g_object_get_data(G_OBJECT(data), "tmp")); gtk_tree_model_get(model, iter, GTK_SAT_TREE_COL_CATNUM, &cat2, -1); if (cat1 == cat2) { /* we have a match */ gtk_tree_store_set(GTK_TREE_STORE(model), iter, GTK_SAT_TREE_COL_SEL, FALSE, -1); } /* continue in order to catch ALL instances of sat */ return FALSE; } /** * Get list of selected satellites. * * @param sat_tree The GtkSatTree * @param size Return location for number of selected sats. * @return A newly allocated array containing the selected satellites or * NULL if no satellites are selected. * * The returned array should be g_freed when no longer needed. */ guint *gtk_sat_tree_get_selected(GtkSatTree * sat_tree, gsize * size) { guint i; gsize s; guint *ret; /* sanity check */ if ((sat_tree == NULL) || !IS_GTK_SAT_TREE(sat_tree)) { sat_log_log(SAT_LOG_LEVEL_ERROR, _("%s: Invalid GtkSatTree!"), __func__); return NULL; } /* parameter are ok */ s = g_slist_length(sat_tree->selection); if (s < 1) { sat_log_log(SAT_LOG_LEVEL_DEBUG, _("%s: There are no satellites selected => NULL."), __func__); *size = 0; return NULL; } ret = (guint *) g_try_malloc(s * sizeof(guint)); for (i = 0; i < s; i++) { ret[i] = GPOINTER_TO_UINT(g_slist_nth_data(sat_tree->selection, i)); } if (size != NULL) *size = s; return ret; } /** * Compare two rows of the GtkSatTree. * * @param model The tree model of the GtkSatTree. * @param a The first row. * @param b The second row. * @param userdata Not used. * * This function is used by the sorting algorithm to compare two rows of the * GtkSatTree widget. The unctions works by comparing the character strings * in the name column. */ static gint compare_func(GtkTreeModel * model, GtkTreeIter * a, GtkTreeIter * b, gpointer userdata) { gchar *sat1, *sat2; gint ret = 0; (void)userdata; gtk_tree_model_get(model, a, GTK_SAT_TREE_COL_NAME, &sat1, -1); gtk_tree_model_get(model, b, GTK_SAT_TREE_COL_NAME, &sat2, -1); ret = g_ascii_strcasecmp(sat1, sat2); g_free(sat1); g_free(sat2); return ret; } /** * Expand all nodes in the GtkSatTree. * * @param button The GtkButton that received the signal. * @param tree Pointer to the GtkSatTree widget. * * This function expands all rows in the tree view in order to make it * searchable. */ static void expand_cb(GtkWidget * button, gpointer tree) { (void)button; gtk_tree_view_expand_all(GTK_TREE_VIEW(GTK_SAT_TREE(tree)->tree)); } /** * Collapse all nodes in the GtkSatTree. * * @param button The GtkButton that received the signal. * @param tree Pointer to the GtkSatTree widget. * * This function collapses all rows in the tree view. */ static void collapse_cb(GtkWidget * button, gpointer tree) { (void)button; gtk_tree_view_collapse_all(GTK_TREE_VIEW(GTK_SAT_TREE(tree)->tree)); }
csete/gpredict
src/gtk-sat-tree.c
C
gpl-2.0
26,119
<div class="menu-submenu"> <div class="panel panel-submenu"> <div class="panel-heading"><h6 class="panel-title">Veja também</h6></div> <div class="panel-body vertical"> <?php wp_nav_menu( array( 'theme_location' => 'menu-interno', 'container_class' => 'menu' )); ?> </div> </div> </div>
diraol/participacao-tema
menu-vertical.php
PHP
gpl-2.0
300
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * * Copyright (c) 2011 * * ChangeLog * * */ #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/keyboard.h> #include <linux/ioport.h> #include <asm/irq.h> #include <asm/io.h> #include <linux/timer.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/pm.h> #include <linux/earlysuspend.h> #endif //#define KEY_DEBUG //#define KEY_DEBUG_LEVEL2 #define PRINT_SUSPEND_INFO #define KEY_MAX_CNT (13) #define KEY_BASSADDRESS (0xf1c22800) #define LRADC_CTRL (0x00) #define LRADC_INTC (0x04) #define LRADC_INT_STA (0x08) #define LRADC_DATA0 (0x0c) #define LRADC_DATA1 (0x10) #define FIRST_CONCERT_DLY (2<<24) #define CHAN (0x3) #define ADC_CHAN_SELECT (CHAN<<22) #define LRADC_KEY_MODE (0) #define KEY_MODE_SELECT (LRADC_KEY_MODE<<12) #define LEVELB_VOL (0<<4) #define LRADC_HOLD_EN (1<<6) #define LRADC_SAMPLE_32HZ (3<<2) #define LRADC_SAMPLE_62HZ (2<<2) #define LRADC_SAMPLE_125HZ (1<<2) #define LRADC_SAMPLE_250HZ (0<<2) #define LRADC_EN (1<<0) #define LRADC_ADC1_UP_EN (1<<12) #define LRADC_ADC1_DOWN_EN (1<<9) #define LRADC_ADC1_DATA_EN (1<<8) #define LRADC_ADC0_UP_EN (1<<4) #define LRADC_ADC0_DOWN_EN (1<<1) #define LRADC_ADC0_DATA_EN (1<<0) #define LRADC_ADC1_UPPEND (1<<12) #define LRADC_ADC1_DOWNPEND (1<<9) #define LRADC_ADC1_DATAPEND (1<<8) #define LRADC_ADC0_UPPEND (1<<4) #define LRADC_ADC0_DOWNPEND (1<<1) #define LRADC_ADC0_DATAPEND (1<<0) #define EVB //#define CUSTUM #define ONE_CHANNEL #define MODE_0V2 //#define MODE_0V15 //#define TWO_CHANNEL #ifdef MODE_0V2 //standard of key maping //0.2V mode #define REPORT_START_NUM (5) #define REPORT_KEY_LOW_LIMIT_COUNT (3) #define MAX_CYCLE_COUNTER (100) //#define REPORT_REPEAT_KEY_BY_INPUT_CORE //#define REPORT_REPEAT_KEY_FROM_HW #define INITIAL_VALUE (0Xff) static unsigned char keypad_mapindex[64] = { 0,0,0,0,0,0,0,0, //key 1, 8¸ö£¬ 0-7 1,1,1,1,1,1,1, //key 2, 7¸ö£¬ 8-14 2,2,2,2,2,2,2, //key 3, 7¸ö£¬ 15-21 3,3,3,3,3,3, //key 4, 6¸ö£¬ 22-27 4,4,4,4,4,4, //key 5, 6¸ö£¬ 28-33 5,5,5,5,5,5, //key 6, 6¸ö£¬ 34-39 6,6,6,6,6,6,6,6,6,6, //key 7, 10¸ö£¬40-49 7,7,7,7,7,7,7,7,7,7,7,7,7,7 //key 8, 17¸ö£¬50-63 }; #endif #ifdef MODE_0V15 //0.15V mode static unsigned char keypad_mapindex[64] = { 0,0,0, //key1 1,1,1,1,1, //key2 2,2,2,2,2, 3,3,3,3, 4,4,4,4,4, 5,5,5,5,5, 6,6,6,6,6, 7,7,7,7, 8,8,8,8,8, 9,9,9,9,9, 10,10,10,10, 11,11,11,11, 12,12,12,12,12,12,12,12,12,12 //key13 }; #endif #ifdef EVB static unsigned int sun4i_scankeycodes[KEY_MAX_CNT]= { [0 ] = KEY_VOLUMEUP, [1 ] = KEY_VOLUMEDOWN, [2 ] = KEY_MENU, [3 ] = KEY_SEARCH, [4 ] = KEY_HOME, [5 ] = KEY_ESC, [6 ] = KEY_ENTER, [7 ] = KEY_RESERVED, [8 ] = KEY_RESERVED, [9 ] = KEY_RESERVED, [10] = KEY_RESERVED, [11] = KEY_RESERVED, [12] = KEY_RESERVED, }; #endif #ifdef CONFIG_HAS_EARLYSUSPEND struct sun4i_keyboard_data { struct early_suspend early_suspend; }; #endif static volatile unsigned int key_val; static struct input_dev *sun4ikbd_dev; static unsigned char scancode; static unsigned char key_cnt = 0; static unsigned char cycle_buffer[REPORT_START_NUM] = {0}; static unsigned char transfer_code = INITIAL_VALUE; #ifdef CONFIG_HAS_EARLYSUSPEND static struct sun4i_keyboard_data *keyboard_data; #endif //Í£ÓÃÉ豸 #ifdef CONFIG_HAS_EARLYSUSPEND static void sun4i_keyboard_suspend(struct early_suspend *h) { /*int ret; struct sun4i_keyboard_data *ts = container_of(h, struct sun4i_keyboard_data, early_suspend); */ #ifdef PRINT_SUSPEND_INFO printk("enter earlysuspend: sun4i_keyboard_suspend. \n"); #endif writel(0,KEY_BASSADDRESS + LRADC_CTRL); return ; } //ÖØÐ»½ÐÑ static void sun4i_keyboard_resume(struct early_suspend *h) { /*int ret; struct sun4i_keyboard_data *ts = container_of(h, struct sun4i_keyboard_data, early_suspend); */ #ifdef PRINT_SUSPEND_INFO printk("enter laterresume: sun4i_keyboard_resume. \n"); #endif writel(FIRST_CONCERT_DLY|LEVELB_VOL|KEY_MODE_SELECT|LRADC_HOLD_EN|ADC_CHAN_SELECT|LRADC_SAMPLE_62HZ|LRADC_EN,KEY_BASSADDRESS + LRADC_CTRL); return ; } #else #endif static irqreturn_t sun4i_isr_key(int irq, void *dummy) { unsigned int reg_val; int judge_flag = 0; int loop = 0; #ifdef KEY_DEBUG printk("Key Interrupt\n"); #endif reg_val = readl(KEY_BASSADDRESS + LRADC_INT_STA); //writel(reg_val,KEY_BASSADDRESS + LRADC_INT_STA); if(reg_val&LRADC_ADC0_DOWNPEND) { #ifdef KEY_DEBUG printk("key down\n"); #endif } if(reg_val&LRADC_ADC0_DATAPEND) { key_val = readl(KEY_BASSADDRESS+LRADC_DATA0); if(key_val < 0x3f) { /*key_val = readl(KEY_BASSADDRESS + LRADC_DATA0); cancode = keypad_mapindex[key_val&0x3f]; #ifdef KEY_DEBUG printk("raw data: key_val == %u , scancode == %u \n", key_val, scancode); #endif */ cycle_buffer[key_cnt%REPORT_START_NUM] = key_val&0x3f; if((key_cnt + 1) < REPORT_START_NUM) { //do not report key message }else{ //scancode = cycle_buffer[(key_cnt-2)%REPORT_START_NUM]; if(cycle_buffer[(key_cnt - REPORT_START_NUM + 1)%REPORT_START_NUM] \ == cycle_buffer[(key_cnt - REPORT_START_NUM + 2)%REPORT_START_NUM]) { key_val = cycle_buffer[(key_cnt - REPORT_START_NUM + 1)%REPORT_START_NUM]; scancode = keypad_mapindex[key_val&0x3f]; judge_flag = 1; } if((!judge_flag) && cycle_buffer[(key_cnt - REPORT_START_NUM + 4)%REPORT_START_NUM] \ == cycle_buffer[(key_cnt - REPORT_START_NUM + 5)%REPORT_START_NUM]) { key_val = cycle_buffer[(key_cnt - REPORT_START_NUM + 5)%REPORT_START_NUM]; scancode = keypad_mapindex[key_val&0x3f]; judge_flag = 1; } if(1 == judge_flag) { #ifdef KEY_DEBUG_LEVEL2 printk("report data: key_val :%8d transfer_code: %8d , scancode: %8d\n",\ key_val, transfer_code, scancode); #endif if(transfer_code == scancode){ //report repeat key value #ifdef REPORT_REPEAT_KEY_FROM_HW input_report_key(sun4ikbd_dev, sun4i_scankeycodes[scancode], 0); input_sync(sun4ikbd_dev); input_report_key(sun4ikbd_dev, sun4i_scankeycodes[scancode], 1); input_sync(sun4ikbd_dev); #else //do not report key value #endif }else if(INITIAL_VALUE != transfer_code){ //report previous key value up signal + report current key value down input_report_key(sun4ikbd_dev, sun4i_scankeycodes[transfer_code], 0); input_sync(sun4ikbd_dev); input_report_key(sun4ikbd_dev, sun4i_scankeycodes[scancode], 1); input_sync(sun4ikbd_dev); transfer_code = scancode; }else{ //INITIAL_VALUE == transfer_code, first time to report key event input_report_key(sun4ikbd_dev, sun4i_scankeycodes[scancode], 1); input_sync(sun4ikbd_dev); transfer_code = scancode; } } } key_cnt++; if(key_cnt > 2 * MAX_CYCLE_COUNTER ){ key_cnt -= MAX_CYCLE_COUNTER; } } } if(reg_val&LRADC_ADC0_UPPEND) { if(key_cnt > REPORT_START_NUM) { if(INITIAL_VALUE != transfer_code) { #ifdef KEY_DEBUG_LEVEL2 printk("report data: key_val :%8d transfer_code: %8d \n",key_val, transfer_code); #endif input_report_key(sun4ikbd_dev, sun4i_scankeycodes[transfer_code], 0); input_sync(sun4ikbd_dev); } }else if((key_cnt + 1) >= REPORT_KEY_LOW_LIMIT_COUNT){ //rely on hardware first_delay work, need to be verified! if(cycle_buffer[0] == cycle_buffer[1]){ key_val = cycle_buffer[0]; scancode = keypad_mapindex[key_val&0x3f]; #ifdef KEY_DEBUG_LEVEL2 printk("report data: key_val :%8d scancode: %8d \n",key_val, scancode); #endif input_report_key(sun4ikbd_dev, sun4i_scankeycodes[scancode], 1); input_sync(sun4ikbd_dev); input_report_key(sun4ikbd_dev, sun4i_scankeycodes[scancode], 0); input_sync(sun4ikbd_dev); } } #ifdef KEY_DEBUG printk("key up \n"); #endif key_cnt = 0; judge_flag = 0; transfer_code = INITIAL_VALUE; for(loop = 0; loop < REPORT_START_NUM; loop++) { cycle_buffer[loop] = 0; } } writel(reg_val,KEY_BASSADDRESS + LRADC_INT_STA); return IRQ_HANDLED; } static int __init sun4ikbd_init(void) { int i; int err =0; #ifdef KEY_DEBUG printk("sun4ikbd_init \n"); #endif sun4ikbd_dev = input_allocate_device(); if (!sun4ikbd_dev) { printk(KERN_ERR "sun4ikbd: not enough memory for input device\n"); err = -ENOMEM; goto fail1; } sun4ikbd_dev->name = "sun4i-keyboard"; sun4ikbd_dev->phys = "sun4ikbd/input0"; sun4ikbd_dev->id.bustype = BUS_HOST; sun4ikbd_dev->id.vendor = 0x0001; sun4ikbd_dev->id.product = 0x0001; sun4ikbd_dev->id.version = 0x0100; #ifdef REPORT_REPEAT_KEY_BY_INPUT_CORE sun4ikbd_dev->evbit[0] = BIT_MASK(EV_KEY)|BIT_MASK(EV_REP); printk("REPORT_REPEAT_KEY_BY_INPUT_CORE is defined, support report repeat key value. \n"); #else sun4ikbd_dev->evbit[0] = BIT_MASK(EV_KEY); #endif for (i = 0; i < KEY_MAX_CNT; i++) set_bit(sun4i_scankeycodes[i], sun4ikbd_dev->keybit); #ifdef ONE_CHANNEL writel(LRADC_ADC0_DOWN_EN|LRADC_ADC0_UP_EN|LRADC_ADC0_DATA_EN,KEY_BASSADDRESS + LRADC_INTC); writel(FIRST_CONCERT_DLY|LEVELB_VOL|KEY_MODE_SELECT|LRADC_HOLD_EN|ADC_CHAN_SELECT|LRADC_SAMPLE_62HZ|LRADC_EN,KEY_BASSADDRESS + LRADC_CTRL); //writel(FIRST_CONCERT_DLY|LEVELB_VOL|KEY_MODE_SELECT|ADC_CHAN_SELECT|LRADC_SAMPLE_62HZ|LRADC_EN,KEY_BASSADDRESS + LRADC_CTRL); #else #endif if (request_irq(SW_INT_IRQNO_LRADC, sun4i_isr_key, 0, "sun4ikbd", NULL)){ err = -EBUSY; printk("request irq failure. \n"); goto fail2; } err = input_register_device(sun4ikbd_dev); if (err) goto fail3; #ifdef CONFIG_HAS_EARLYSUSPEND printk("==register_early_suspend =\n"); keyboard_data = kzalloc(sizeof(*keyboard_data), GFP_KERNEL); if (keyboard_data == NULL) { err = -ENOMEM; goto err_alloc_data_failed; } keyboard_data->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 3; keyboard_data->early_suspend.suspend = sun4i_keyboard_suspend; keyboard_data->early_suspend.resume = sun4i_keyboard_resume; register_early_suspend(&keyboard_data->early_suspend); #endif return 0; #ifdef CONFIG_HAS_EARLYSUSPEND err_alloc_data_failed: #endif fail3: free_irq(SW_INT_IRQNO_LRADC, sun4i_isr_key); fail2: input_free_device(sun4ikbd_dev); fail1: ; #ifdef KEY_DEBUG printk("sun4ikbd_init failed. \n"); #endif return err; } static void __exit sun4ikbd_exit(void) { #ifdef CONFIG_HAS_EARLYSUSPEND unregister_early_suspend(&keyboard_data->early_suspend); #endif free_irq(SW_INT_IRQNO_LRADC, sun4i_isr_key); input_unregister_device(sun4ikbd_dev); } module_init(sun4ikbd_init); module_exit(sun4ikbd_exit); MODULE_AUTHOR(" <@>"); MODULE_DESCRIPTION("sun4i-keyboard driver"); MODULE_LICENSE("GPL");
zhangdaiyan/my-allwinner-ics_linux-3.0
drivers/input/keyboard/sun4i-keyboard.c
C
gpl-2.0
11,632
Fabricator(:user) do name 'Bruce Wayne' username { sequence(:username) { |i| "bruce#{i}" } } email { sequence(:email) { |i| "bruce#{i}@wayne.com" } } password 'myawesomepassword' trust_level TrustLevel.levels[:basic] bio_raw "I'm batman!" end Fabricator(:coding_horror, from: :user) do name 'Coding Horror' username 'CodingHorror' email 'jeff@somewhere.com' password 'mymoreawesomepassword' end Fabricator(:evil_trout, from: :user) do name 'Evil Trout' username 'eviltrout' email 'eviltrout@somewhere.com' password 'imafish' end Fabricator(:walter_white, from: :user) do name 'Walter White' username 'heisenberg' email 'wwhite@bluemeth.com' password 'letscook' end Fabricator(:moderator, from: :user) do name 'A. Moderator' username 'moderator' email 'moderator@discourse.org' moderator true end Fabricator(:admin, from: :user) do name 'Anne Admin' username 'anne' email 'anne@discourse.org' admin true end Fabricator(:another_admin, from: :user) do name 'Anne Admin the 2nd' username 'anne2' email 'anne2@discourse.org' admin true end
bounscale/discourse-heroku
spec/fabricators/user_fabricator.rb
Ruby
gpl-2.0
1,103
/* SSL support. Copyright (C) 2000 Free Software Foundation, Inc. Contributed by Christian Fraenkel. This file is part of GNU Wget. GNU Wget is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. GNU Wget is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Wget; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. In addition, as a special exception, the Free Software Foundation gives permission to link the code of its release of Wget with the OpenSSL project's "OpenSSL" library (or with modified versions of it that use the same license as the "OpenSSL" library), and distribute the linked executables. You must obey the GNU General Public License in all respects for all of the code used other than "OpenSSL". If you modify this file, you may extend this exception to your version of the file, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. */ #include <config.h> #ifdef HAVE_SSL #include <assert.h> #include <errno.h> #ifdef HAVE_UNISTD_H # include <unistd.h> #endif #ifdef HAVE_STRING_H # include <string.h> #else # include <strings.h> #endif #include <openssl/bio.h> #include <openssl/crypto.h> #include <openssl/x509.h> #include <openssl/ssl.h> #include <openssl/err.h> #include <openssl/pem.h> #include <openssl/rand.h> #include "wget.h" #include "utils.h" #include "connect.h" #include "url.h" #ifndef errno extern int errno; #endif void ssl_init_prng (void) { /* It is likely that older versions of OpenSSL will fail on non-Linux machines because this code is unable to seed the PRNG on older versions of the library. */ #if SSLEAY_VERSION_NUMBER >= 0x00905100 char rand_file[256]; int maxrand = 500; /* First, seed from a file specified by the user. This will be $RANDFILE, if set, or ~/.rnd. */ RAND_file_name (rand_file, sizeof (rand_file)); if (rand_file) /* Seed at most 16k (value borrowed from curl) from random file. */ RAND_load_file (rand_file, 16384); if (RAND_status ()) return; /* Get random data from EGD if opt.sslegdsock was set. */ if (opt.sslegdsock && *opt.sslegdsock) RAND_egd (opt.sslegdsock); if (RAND_status ()) return; #ifdef WINDOWS /* Under Windows, we can try to seed the PRNG using screen content. This may or may not work, depending on whether we'll calling Wget interactively. */ RAND_screen (); if (RAND_status ()) return; #endif /* Still not enough randomness, presumably because neither random file nor EGD have been available. Use the stupidest possible method -- seed OpenSSL's PRNG with the system's PRNG. This is insecure in the cryptographic sense, but people who care about security will use /dev/random or their own source of randomness anyway. */ while (RAND_status () == 0 && maxrand-- > 0) { unsigned char rnd = random_number (256); RAND_seed (&rnd, sizeof (rnd)); } if (RAND_status () == 0) { logprintf (LOG_NOTQUIET, _("Could not seed OpenSSL PRNG; disabling SSL.\n")); scheme_disable (SCHEME_HTTPS); } #endif /* SSLEAY_VERSION_NUMBER >= 0x00905100 */ } int verify_callback (int ok, X509_STORE_CTX *ctx) { char *s, buf[256]; s = X509_NAME_oneline (X509_get_subject_name (ctx->current_cert), buf, 256); if (ok == 0) { switch (ctx->error) { case X509_V_ERR_CERT_NOT_YET_VALID: case X509_V_ERR_CERT_HAS_EXPIRED: /* This mean the CERT is not valid !!! */ ok = 0; break; case X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT: /* Unsure if we should handle that this way */ ok = 1; break; } } return ok; } /* pass all ssl errors to DEBUGP returns the number of printed errors */ int ssl_printerrors (void) { int ocerr = 0; unsigned long curerr = 0; char errbuff[1024]; memset(errbuff, 0, sizeof(errbuff)); while ( 0 != (curerr = ERR_get_error ())) { DEBUGP (("OpenSSL: %s\n", ERR_error_string (curerr, errbuff))); ++ocerr; } return ocerr; } /* Creates a SSL Context and sets some defaults for it */ uerr_t init_ssl (SSL_CTX **ctx) { SSL_METHOD *meth = NULL; int verify; int can_validate; SSL_library_init (); SSL_load_error_strings (); SSLeay_add_all_algorithms (); SSLeay_add_ssl_algorithms (); switch (opt.sslprotocol) { default: meth = SSLv23_client_method (); break; case 1 : meth = SSLv2_client_method (); break; case 2 : meth = SSLv3_client_method (); break; case 3 : meth = TLSv1_client_method (); break; } if (meth == NULL) { ssl_printerrors (); return SSLERRCTXCREATE; } *ctx = SSL_CTX_new (meth); if (meth == NULL) { ssl_printerrors (); return SSLERRCTXCREATE; } /* Can we validate the server Cert ? */ if (opt.sslcadir != NULL || opt.sslcafile != NULL) { SSL_CTX_load_verify_locations (*ctx, opt.sslcafile, opt.sslcadir); can_validate = 1; } else { can_validate = 0; } if (!opt.sslcheckcert) { /* check cert but ignore error, do not break handshake on error */ verify = SSL_VERIFY_NONE; } else { if (!can_validate) { logprintf (LOG_NOTQUIET, "Warrining validation of Server Cert not possible!\n"); verify = SSL_VERIFY_NONE; } else { /* break handshake if server cert is not valid but allow NO-Cert mode */ verify = SSL_VERIFY_PEER; } } SSL_CTX_set_verify (*ctx, verify, verify_callback); if (opt.sslcertfile != NULL || opt.sslcertkey != NULL) { int ssl_cert_type; if (!opt.sslcerttype) ssl_cert_type = SSL_FILETYPE_PEM; else ssl_cert_type = SSL_FILETYPE_ASN1; if (opt.sslcertkey == NULL) opt.sslcertkey = opt.sslcertfile; if (opt.sslcertfile == NULL) opt.sslcertfile = opt.sslcertkey; if (SSL_CTX_use_certificate_file (*ctx, opt.sslcertfile, ssl_cert_type) <= 0) { ssl_printerrors (); return SSLERRCERTFILE; } if (SSL_CTX_use_PrivateKey_file (*ctx, opt.sslcertkey , ssl_cert_type) <= 0) { ssl_printerrors (); return SSLERRCERTKEY; } } return 0; /* Succeded */ } void shutdown_ssl (SSL* con) { if (con == NULL) return; if (0==SSL_shutdown (con)) SSL_shutdown (con); SSL_free (con); } /* Sets up a SSL structure and performs the handshake on fd Returns 0 if everything went right Returns 1 if something went wrong ----- TODO: More exit codes */ int connect_ssl (SSL **con, SSL_CTX *ctx, int fd) { if (NULL == (*con = SSL_new (ctx))) { ssl_printerrors (); return 1; } if (!SSL_set_fd (*con, fd)) { ssl_printerrors (); return 1; } SSL_set_connect_state (*con); switch (SSL_connect (*con)) { case 1 : return (*con)->state != SSL_ST_OK; default: ssl_printerrors (); shutdown_ssl (*con); *con = NULL; return 1; case 0 : ssl_printerrors (); SSL_free (*con); *con = NULL; return 1; } return 0; } void free_ssl_ctx (SSL_CTX * ctx) { SSL_CTX_free (ctx); } /* SSL version of iread. Only exchanged read for SSL_read Read at most LEN bytes from FD, storing them to BUF. */ int ssl_iread (SSL *con, char *buf, int len) { int res, fd; BIO_get_fd (con->rbio, &fd); #ifdef HAVE_SELECT if (opt.read_timeout && !SSL_pending (con)) if (select_fd (fd, opt.read_timeout, 0) <= 0) return -1; #endif do res = SSL_read (con, buf, len); while (res == -1 && errno == EINTR); return res; } /* SSL version of iwrite. Only exchanged write for SSL_write Write LEN bytes from BUF to FD. */ int ssl_iwrite (SSL *con, char *buf, int len) { int res = 0, fd; BIO_get_fd (con->rbio, &fd); /* `write' may write less than LEN bytes, thus the outward loop keeps trying it until all was written, or an error occurred. The inner loop is reserved for the usual EINTR f*kage, and the innermost loop deals with the same during select(). */ while (len > 0) { #ifdef HAVE_SELECT if (opt.read_timeout) if (select_fd (fd, opt.read_timeout, 1) <= 0) return -1; #endif do res = SSL_write (con, buf, len); while (res == -1 && errno == EINTR); if (res <= 0) break; buf += res; len -= res; } return res; } #endif /* HAVE_SSL */
ipwndev/DSLinux-Mirror
user/wget/src/gen_sslfunc.c
C
gpl-2.0
8,789
--ユーフォロイド・ファイター function c32752319.initial_effect(c) --fusion material c:EnableReviveLimit() aux.AddFusionProcCodeFun(c,7602840,aux.FilterBoolFunction(Card.IsRace,RACE_WARRIOR),1,false,false) --atk local e1=Effect.CreateEffect(c) e1:SetType(EFFECT_TYPE_SINGLE+EFFECT_TYPE_CONTINUOUS) e1:SetCode(EVENT_SPSUMMON_SUCCESS) e1:SetCondition(c32752319.atkcon) e1:SetOperation(c32752319.atkop) c:RegisterEffect(e1) end function c32752319.atkcon(e,tp,eg,ep,ev,re,r,rp) return e:GetHandler():IsSummonType(SUMMON_TYPE_FUSION) end function c32752319.atkop(e,tp,eg,ep,ev,re,r,rp) local c=e:GetHandler() local g=c:GetMaterial() local tc=g:GetFirst() local atk=0 while tc do local catk=tc:GetBaseAttack() if catk<0 then catk=0 end atk=atk+catk tc=g:GetNext() end if atk~=0 then local e1=Effect.CreateEffect(c) e1:SetType(EFFECT_TYPE_SINGLE) e1:SetCode(EFFECT_SET_BASE_ATTACK) e1:SetValue(atk) e1:SetReset(RESET_EVENT+RESETS_STANDARD+RESET_DISABLE) c:RegisterEffect(e1) local e2=e1:Clone() e2:SetCode(EFFECT_SET_BASE_DEFENSE) c:RegisterEffect(e2) end end
nekrozar/ygopro-scripts
c32752319.lua
Lua
gpl-2.0
1,110
<?php /* * Title : Pinpoint Booking System WordPress Plugin (PRO) * Version : 2.1.1 * File : views/addons/views-backend-addons.php * File Version : 1.0.2 * Created / Last Modified : 25 August 2015 * Author : Dot on Paper * Copyright : © 2012 Dot on Paper * Website : http://www.dotonpaper.net * Description : Back end addons views class. */ if (!class_exists('DOPBSPViewsBackEndAddons')){ class DOPBSPViewsBackEndAddons extends DOPBSPViewsBackEnd{ /* * Constructor */ function __construct(){ } /* * Returns addons template. * * @param args (array): function arguments * * @return addons HTML page */ function template($args = array()){ global $DOPBSP; $this->getTranslation(); ?> <div class="wrap DOPBSP-admin"> <!-- Header --> <?php $this->displayHeader($DOPBSP->text('TITLE'), $DOPBSP->text('ADDONS_TITLE')); ?> <!-- Content --> <div class="dopbsp-main dopbsp-hidden"> <table class="dopbsp-content-wrapper"> <colgroup> <col id="DOPBSP-col-column1" class="dopbsp-column1 dopbsp-addons" /> <col id="DOPBSP-col-column-separator1" class="dopbsp-separator" /> <col id="DOPBSP-col-column2" class="dopbsp-column2" /> </colgroup> <tbody> <tr> <td class="dopbsp-column" id="DOPBSP-column1"> <div class="dopbsp-column-header"> <a href="javascript:void(0)" class="dopbsp-button dopbsp-help"><span class="dopbsp-info dopbsp-help"><?php echo $DOPBSP->text('ADDONS_HELP'); ?></span></a> </div> <div class="dopbsp-column-content">&nbsp;</div> </td> <td id="DOPBSP-column-separator1" class="dopbsp-separator"></td> <td id="DOPBSP-column2" class="dopbsp-column"> <div class="dopbsp-column-header">&nbsp;</div> <div class="dopbsp-column-content">&nbsp;</div> </td> </tr> </tbody> </table> </div> </div> <?php } } }
Ishtiaque-Shaad/xyz
wp-content/plugins/dopbsp/views/addons/views-backend-addons.php
PHP
gpl-2.0
2,638
/* * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, see <http://www.gnu.org/licenses/>. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/nls.h> #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include "hyperv_net.h" static void rndis_set_multicast(struct work_struct *w); #define RNDIS_EXT_LEN PAGE_SIZE struct rndis_request { struct list_head list_ent; struct completion wait_event; struct rndis_message response_msg; /* * The buffer for extended info after the RNDIS response message. It's * referenced based on the data offset in the RNDIS message. Its size * is enough for current needs, and should be sufficient for the near * future. */ u8 response_ext[RNDIS_EXT_LEN]; /* Simplify allocation by having a netvsc packet inline */ struct hv_netvsc_packet pkt; struct rndis_message request_msg; /* * The buffer for the extended info after the RNDIS request message. * It is referenced and sized in a similar way as response_ext. */ u8 request_ext[RNDIS_EXT_LEN]; }; static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = { 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; static struct rndis_device *get_rndis_device(void) { struct rndis_device *device; device = kzalloc(sizeof(struct rndis_device), GFP_KERNEL); if (!device) return NULL; spin_lock_init(&device->request_lock); INIT_LIST_HEAD(&device->req_list); INIT_WORK(&device->mcast_work, rndis_set_multicast); device->state = RNDIS_DEV_UNINITIALIZED; return device; } static struct rndis_request *get_rndis_request(struct rndis_device *dev, u32 msg_type, u32 msg_len) { struct rndis_request *request; struct rndis_message *rndis_msg; struct rndis_set_request *set; unsigned long flags; request = kzalloc(sizeof(struct rndis_request), GFP_KERNEL); if (!request) return NULL; init_completion(&request->wait_event); rndis_msg = &request->request_msg; rndis_msg->ndis_msg_type = msg_type; rndis_msg->msg_len = msg_len; request->pkt.q_idx = 0; /* * Set the request id. This field is always after the rndis header for * request/response packet types so we just used the SetRequest as a * template */ set = &rndis_msg->msg.set_req; set->req_id = atomic_inc_return(&dev->new_req_id); /* Add to the request list */ spin_lock_irqsave(&dev->request_lock, flags); list_add_tail(&request->list_ent, &dev->req_list); spin_unlock_irqrestore(&dev->request_lock, flags); return request; } static void put_rndis_request(struct rndis_device *dev, struct rndis_request *req) { unsigned long flags; spin_lock_irqsave(&dev->request_lock, flags); list_del(&req->list_ent); spin_unlock_irqrestore(&dev->request_lock, flags); kfree(req); } static void dump_rndis_message(struct net_device *netdev, const struct rndis_message *rndis_msg) { switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, " "data offset %u data len %u, # oob %u, " "oob offset %u, oob len %u, pkt offset %u, " "pkt len %u\n", rndis_msg->msg_len, rndis_msg->msg.pkt.data_offset, rndis_msg->msg.pkt.data_len, rndis_msg->msg.pkt.num_oob_data_elements, rndis_msg->msg.pkt.oob_data_offset, rndis_msg->msg.pkt.oob_data_len, rndis_msg->msg.pkt.per_pkt_info_offset, rndis_msg->msg.pkt.per_pkt_info_len); break; case RNDIS_MSG_INIT_C: netdev_dbg(netdev, "RNDIS_MSG_INIT_C " "(len %u, id 0x%x, status 0x%x, major %d, minor %d, " "device flags %d, max xfer size 0x%x, max pkts %u, " "pkt aligned %u)\n", rndis_msg->msg_len, rndis_msg->msg.init_complete.req_id, rndis_msg->msg.init_complete.status, rndis_msg->msg.init_complete.major_ver, rndis_msg->msg.init_complete.minor_ver, rndis_msg->msg.init_complete.dev_flags, rndis_msg->msg.init_complete.max_xfer_size, rndis_msg->msg.init_complete. max_pkt_per_msg, rndis_msg->msg.init_complete. pkt_alignment_factor); break; case RNDIS_MSG_QUERY_C: netdev_dbg(netdev, "RNDIS_MSG_QUERY_C " "(len %u, id 0x%x, status 0x%x, buf len %u, " "buf offset %u)\n", rndis_msg->msg_len, rndis_msg->msg.query_complete.req_id, rndis_msg->msg.query_complete.status, rndis_msg->msg.query_complete. info_buflen, rndis_msg->msg.query_complete. info_buf_offset); break; case RNDIS_MSG_SET_C: netdev_dbg(netdev, "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n", rndis_msg->msg_len, rndis_msg->msg.set_complete.req_id, rndis_msg->msg.set_complete.status); break; case RNDIS_MSG_INDICATE: netdev_dbg(netdev, "RNDIS_MSG_INDICATE " "(len %u, status 0x%x, buf len %u, buf offset %u)\n", rndis_msg->msg_len, rndis_msg->msg.indicate_status.status, rndis_msg->msg.indicate_status.status_buflen, rndis_msg->msg.indicate_status.status_buf_offset); break; default: netdev_dbg(netdev, "0x%x (len %u)\n", rndis_msg->ndis_msg_type, rndis_msg->msg_len); break; } } static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; int ret; /* Setup the packet to send it */ packet = &req->pkt; packet->total_data_buflen = req->request_msg.msg_len; packet->page_buf_cnt = 1; pb[0].pfn = virt_to_phys(&req->request_msg) >> PAGE_SHIFT; pb[0].len = req->request_msg.msg_len; pb[0].offset = (unsigned long)&req->request_msg & (PAGE_SIZE - 1); /* Add one page_buf when request_msg crossing page boundary */ if (pb[0].offset + pb[0].len > PAGE_SIZE) { packet->page_buf_cnt++; pb[0].len = PAGE_SIZE - pb[0].offset; pb[1].pfn = virt_to_phys((void *)&req->request_msg + pb[0].len) >> PAGE_SHIFT; pb[1].offset = 0; pb[1].len = req->request_msg.msg_len - pb[0].len; } rcu_read_lock_bh(); ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; } static void rndis_set_link_state(struct rndis_device *rdev, struct rndis_request *request) { u32 link_status; struct rndis_query_complete *query_complete; query_complete = &request->response_msg.msg.query_complete; if (query_complete->status == RNDIS_STATUS_SUCCESS && query_complete->info_buflen == sizeof(u32)) { memcpy(&link_status, (void *)((unsigned long)query_complete + query_complete->info_buf_offset), sizeof(u32)); rdev->link_state = link_status != 0; } } static void rndis_filter_receive_response(struct net_device *ndev, struct netvsc_device *nvdev, const struct rndis_message *resp) { struct rndis_device *dev = nvdev->extension; struct rndis_request *request = NULL; bool found = false; unsigned long flags; /* This should never happen, it means control message * response received after device removed. */ if (dev->state == RNDIS_DEV_UNINITIALIZED) { netdev_err(ndev, "got rndis message uninitialized\n"); return; } spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { /* * All request/response message contains RequestId as the 1st * field */ if (request->request_msg.msg.init_req.req_id == resp->msg.init_complete.req_id) { found = true; break; } } spin_unlock_irqrestore(&dev->request_lock, flags); if (found) { if (resp->msg_len <= sizeof(struct rndis_message) + RNDIS_EXT_LEN) { memcpy(&request->response_msg, resp, resp->msg_len); if (request->request_msg.ndis_msg_type == RNDIS_MSG_QUERY && request->request_msg.msg. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) rndis_set_link_state(dev, request); } else { netdev_err(ndev, "rndis response buffer overflow " "detected (size %u max %zu)\n", resp->msg_len, sizeof(struct rndis_message)); if (resp->ndis_msg_type == RNDIS_MSG_RESET_C) { /* does not have a request id field */ request->response_msg.msg.reset_complete. status = RNDIS_STATUS_BUFFER_OVERFLOW; } else { request->response_msg.msg. init_complete.status = RNDIS_STATUS_BUFFER_OVERFLOW; } } complete(&request->wait_event); } else { netdev_err(ndev, "no rndis request found for this response " "(id 0x%x res type 0x%x)\n", resp->msg.init_complete.req_id, resp->ndis_msg_type); } } /* * Get the Per-Packet-Info with the specified type * return NULL if not found. */ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type) { struct rndis_per_packet_info *ppi; int len; if (rpkt->per_pkt_info_offset == 0) return NULL; ppi = (struct rndis_per_packet_info *)((ulong)rpkt + rpkt->per_pkt_info_offset); len = rpkt->per_pkt_info_len; while (len > 0) { if (ppi->type == type) return (void *)((ulong)ppi + ppi->ppi_offset); len -= ppi->size; ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size); } return NULL; } static int rndis_filter_receive_data(struct net_device *ndev, struct netvsc_device *nvdev, struct rndis_message *msg, struct vmbus_channel *channel, void *data, u32 data_buflen) { struct rndis_packet *rndis_pkt = &msg->msg.pkt; const struct ndis_tcp_ip_checksum_info *csum_info; const struct ndis_pkt_8021q_info *vlan; u32 data_offset; /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; data_buflen -= data_offset; /* * Make sure we got a valid RNDIS message, now total_data_buflen * should be the data packet size plus the trailer padding size */ if (unlikely(data_buflen < rndis_pkt->data_len)) { netdev_err(ndev, "rndis message buffer " "overflow detected (got %u, min %u)" "...dropping this message!\n", data_buflen, rndis_pkt->data_len); return NVSP_STAT_FAIL; } vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO); /* * Remove the rndis trailer padding from rndis packet message * rndis_pkt->data_len tell us the real data length, we only copy * the data packet to the stack, without the rndis trailer padding */ data = (void *)((unsigned long)data + data_offset); csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO); return netvsc_recv_callback(ndev, nvdev, channel, data, rndis_pkt->data_len, csum_info, vlan); } int rndis_filter_receive(struct net_device *ndev, struct netvsc_device *net_dev, struct vmbus_channel *channel, void *data, u32 buflen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct rndis_message *rndis_msg = data; if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(ndev, rndis_msg); switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: return rndis_filter_receive_data(ndev, net_dev, rndis_msg, channel, data, buflen); case RNDIS_MSG_INIT_C: case RNDIS_MSG_QUERY_C: case RNDIS_MSG_SET_C: /* completion msgs */ rndis_filter_receive_response(ndev, net_dev, rndis_msg); break; case RNDIS_MSG_INDICATE: /* notification msgs */ netvsc_linkstatus_callback(ndev, rndis_msg); break; default: netdev_err(ndev, "unhandled rndis message (type %u len %u)\n", rndis_msg->ndis_msg_type, rndis_msg->msg_len); break; } return 0; } static int rndis_filter_query_device(struct rndis_device *dev, struct netvsc_device *nvdev, u32 oid, void *result, u32 *result_size) { struct rndis_request *request; u32 inresult_size = *result_size; struct rndis_query_request *query; struct rndis_query_complete *query_complete; int ret = 0; if (!result) return -EINVAL; *result_size = 0; request = get_rndis_request(dev, RNDIS_MSG_QUERY, RNDIS_MESSAGE_SIZE(struct rndis_query_request)); if (!request) { ret = -ENOMEM; goto cleanup; } /* Setup the rndis query */ query = &request->request_msg.msg.query_req; query->oid = oid; query->info_buf_offset = sizeof(struct rndis_query_request); query->info_buflen = 0; query->dev_vc_handle = 0; if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { struct ndis_offload *hwcaps; u32 nvsp_version = nvdev->nvsp_version; u8 ndis_rev; size_t size; if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) { ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3; size = NDIS_OFFLOAD_SIZE; } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) { ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2; size = NDIS_OFFLOAD_SIZE_6_1; } else { ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1; size = NDIS_OFFLOAD_SIZE_6_0; } request->request_msg.msg_len += size; query->info_buflen = size; hwcaps = (struct ndis_offload *) ((unsigned long)query + query->info_buf_offset); hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD; hwcaps->header.revision = ndis_rev; hwcaps->header.size = size; } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) { struct ndis_recv_scale_cap *cap; request->request_msg.msg_len += sizeof(struct ndis_recv_scale_cap); query->info_buflen = sizeof(struct ndis_recv_scale_cap); cap = (struct ndis_recv_scale_cap *)((unsigned long)query + query->info_buf_offset); cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES; cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2; cap->hdr.size = sizeof(struct ndis_recv_scale_cap); } ret = rndis_filter_send_request(dev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); /* Copy the response back */ query_complete = &request->response_msg.msg.query_complete; if (query_complete->info_buflen > inresult_size) { ret = -1; goto cleanup; } memcpy(result, (void *)((unsigned long)query_complete + query_complete->info_buf_offset), query_complete->info_buflen); *result_size = query_complete->info_buflen; cleanup: if (request) put_rndis_request(dev, request); return ret; } /* Get the hardware offload capabilities */ static int rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device, struct ndis_offload *caps) { u32 caps_len = sizeof(*caps); int ret; memset(caps, 0, sizeof(*caps)); ret = rndis_filter_query_device(dev, net_device, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, caps, &caps_len); if (ret) return ret; if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) { netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n", caps->header.type); return -EINVAL; } if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) { netdev_warn(dev->ndev, "invalid NDIS objrev %x\n", caps->header.revision); return -EINVAL; } if (caps->header.size > caps_len || caps->header.size < NDIS_OFFLOAD_SIZE_6_0) { netdev_warn(dev->ndev, "invalid NDIS objsize %u, data size %u\n", caps->header.size, caps_len); return -EINVAL; } return 0; } static int rndis_filter_query_device_mac(struct rndis_device *dev, struct netvsc_device *net_device) { u32 size = ETH_ALEN; return rndis_filter_query_device(dev, net_device, RNDIS_OID_802_3_PERMANENT_ADDRESS, dev->hw_mac_adr, &size); } #define NWADR_STR "NetworkAddress" #define NWADR_STRLEN 14 int rndis_filter_set_device_mac(struct netvsc_device *nvdev, const char *mac) { struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; struct rndis_config_parameter_info *cpi; wchar_t *cfg_nwadr, *cfg_mac; struct rndis_set_complete *set_complete; char macstr[2*ETH_ALEN+1]; u32 extlen = sizeof(struct rndis_config_parameter_info) + 2*NWADR_STRLEN + 4*ETH_ALEN; int ret; request = get_rndis_request(rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); if (!request) return -ENOMEM; set = &request->request_msg.msg.set_req; set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER; set->info_buflen = extlen; set->info_buf_offset = sizeof(struct rndis_set_request); set->dev_vc_handle = 0; cpi = (struct rndis_config_parameter_info *)((ulong)set + set->info_buf_offset); cpi->parameter_name_offset = sizeof(struct rndis_config_parameter_info); /* Multiply by 2 because host needs 2 bytes (utf16) for each char */ cpi->parameter_name_length = 2*NWADR_STRLEN; cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING; cpi->parameter_value_offset = cpi->parameter_name_offset + cpi->parameter_name_length; /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */ cpi->parameter_value_length = 4*ETH_ALEN; cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset); cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset); ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN, cfg_nwadr, NWADR_STRLEN); if (ret < 0) goto cleanup; snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac); ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN, cfg_mac, 2*ETH_ALEN); if (ret < 0) goto cleanup; ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; if (set_complete->status != RNDIS_STATUS_SUCCESS) ret = -EIO; cleanup: put_rndis_request(rdev, request); return ret; } static int rndis_filter_set_offload_params(struct net_device *ndev, struct netvsc_device *nvdev, struct ndis_offload_params *req_offloads) { struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; struct ndis_offload_params *offload_params; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_offload_params); int ret; u32 vsp_version = nvdev->nvsp_version; if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { extlen = VERSION_4_OFFLOAD_SIZE; /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support * UDP checksum offload. */ req_offloads->udp_ip_v4_csum = 0; req_offloads->udp_ip_v6_csum = 0; } request = get_rndis_request(rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); if (!request) return -ENOMEM; set = &request->request_msg.msg.set_req; set->oid = OID_TCP_OFFLOAD_PARAMETERS; set->info_buflen = extlen; set->info_buf_offset = sizeof(struct rndis_set_request); set->dev_vc_handle = 0; offload_params = (struct ndis_offload_params *)((ulong)set + set->info_buf_offset); *offload_params = *req_offloads; offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT; offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3; offload_params->header.size = extlen; ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; if (set_complete->status != RNDIS_STATUS_SUCCESS) { netdev_err(ndev, "Fail to set offload on host side:0x%x\n", set_complete->status); ret = -EINVAL; } cleanup: put_rndis_request(rdev, request); return ret; } int rndis_filter_set_rss_param(struct rndis_device *rdev, const u8 *rss_key) { struct net_device *ndev = rdev->ndev; struct rndis_request *request; struct rndis_set_request *set; struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_recv_scale_param) + 4 * ITAB_NUM + NETVSC_HASH_KEYLEN; struct ndis_recv_scale_param *rssp; u32 *itab; u8 *keyp; int i, ret; request = get_rndis_request( rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); if (!request) return -ENOMEM; set = &request->request_msg.msg.set_req; set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS; set->info_buflen = extlen; set->info_buf_offset = sizeof(struct rndis_set_request); set->dev_vc_handle = 0; rssp = (struct ndis_recv_scale_param *)(set + 1); rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS; rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2; rssp->hdr.size = sizeof(struct ndis_recv_scale_param); rssp->flag = 0; rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 | NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_TCP_IPV6; rssp->indirect_tabsize = 4*ITAB_NUM; rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param); rssp->hashkey_size = NETVSC_HASH_KEYLEN; rssp->kashkey_offset = rssp->indirect_taboffset + rssp->indirect_tabsize; /* Set indirection table entries */ itab = (u32 *)(rssp + 1); for (i = 0; i < ITAB_NUM; i++) itab[i] = rdev->rx_table[i]; /* Set hask key values */ keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset); memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN); ret = rndis_filter_send_request(rdev, request); if (ret != 0) goto cleanup; wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; if (set_complete->status == RNDIS_STATUS_SUCCESS) memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN); else { netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", set_complete->status); ret = -EINVAL; } cleanup: put_rndis_request(rdev, request); return ret; } static int rndis_filter_query_device_link_status(struct rndis_device *dev, struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_status; return rndis_filter_query_device(dev, net_device, RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, &link_status, &size); } static int rndis_filter_query_link_speed(struct rndis_device *dev, struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_speed; struct net_device_context *ndc; int ret; ret = rndis_filter_query_device(dev, net_device, RNDIS_OID_GEN_LINK_SPEED, &link_speed, &size); if (!ret) { ndc = netdev_priv(dev->ndev); /* The link speed reported from host is in 100bps unit, so * we convert it to Mbps here. */ ndc->speed = link_speed / 10000; } return ret; } static int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) { struct rndis_request *request; struct rndis_set_request *set; int ret; if (dev->filter == new_filter) return 0; request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + sizeof(u32)); if (!request) return -ENOMEM; /* Setup the rndis set */ set = &request->request_msg.msg.set_req; set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; set->info_buflen = sizeof(u32); set->info_buf_offset = sizeof(struct rndis_set_request); memcpy((void *)(unsigned long)set + sizeof(struct rndis_set_request), &new_filter, sizeof(u32)); ret = rndis_filter_send_request(dev, request); if (ret == 0) { wait_for_completion(&request->wait_event); dev->filter = new_filter; } put_rndis_request(dev, request); return ret; } static void rndis_set_multicast(struct work_struct *w) { struct rndis_device *rdev = container_of(w, struct rndis_device, mcast_work); u32 filter = NDIS_PACKET_TYPE_DIRECTED; unsigned int flags = rdev->ndev->flags; if (flags & IFF_PROMISC) { filter = NDIS_PACKET_TYPE_PROMISCUOUS; } else { if (flags & IFF_ALLMULTI) filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; if (flags & IFF_BROADCAST) filter |= NDIS_PACKET_TYPE_BROADCAST; } rndis_filter_set_packet_filter(rdev, filter); } void rndis_filter_update(struct netvsc_device *nvdev) { struct rndis_device *rdev = nvdev->extension; schedule_work(&rdev->mcast_work); } static int rndis_filter_init_device(struct rndis_device *dev, struct netvsc_device *nvdev) { struct rndis_request *request; struct rndis_initialize_request *init; struct rndis_initialize_complete *init_complete; u32 status; int ret; request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); if (!request) { ret = -ENOMEM; goto cleanup; } /* Setup the rndis set */ init = &request->request_msg.msg.init_req; init->major_ver = RNDIS_MAJOR_VERSION; init->minor_ver = RNDIS_MINOR_VERSION; init->max_xfer_size = 0x4000; dev->state = RNDIS_DEV_INITIALIZING; ret = rndis_filter_send_request(dev, request); if (ret != 0) { dev->state = RNDIS_DEV_UNINITIALIZED; goto cleanup; } wait_for_completion(&request->wait_event); init_complete = &request->response_msg.msg.init_complete; status = init_complete->status; if (status == RNDIS_STATUS_SUCCESS) { dev->state = RNDIS_DEV_INITIALIZED; nvdev->max_pkt = init_complete->max_pkt_per_msg; nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor; ret = 0; } else { dev->state = RNDIS_DEV_UNINITIALIZED; ret = -EINVAL; } cleanup: if (request) put_rndis_request(dev, request); return ret; } static bool netvsc_device_idle(const struct netvsc_device *nvdev) { int i; for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; if (nvchan->mrc.first != nvchan->mrc.next) return false; if (atomic_read(&nvchan->queue_sends) > 0) return false; } return true; } static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, RNDIS_MESSAGE_SIZE(struct rndis_halt_request)); if (!request) goto cleanup; /* Setup the rndis set */ halt = &request->request_msg.msg.halt_req; halt->req_id = atomic_inc_return(&dev->new_req_id); /* Ignore return since this msg is optional. */ rndis_filter_send_request(dev, request); dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: nvdev->destroy = true; /* Force flag to be ordered before waiting */ wmb(); /* Wait for all send completions */ wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev)); if (request) put_rndis_request(dev, request); } static int rndis_filter_open_device(struct rndis_device *dev) { int ret; if (dev->state != RNDIS_DEV_INITIALIZED) return 0; ret = rndis_filter_set_packet_filter(dev, NDIS_PACKET_TYPE_BROADCAST | NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); if (ret == 0) dev->state = RNDIS_DEV_DATAINITIALIZED; return ret; } static int rndis_filter_close_device(struct rndis_device *dev) { int ret; if (dev->state != RNDIS_DEV_DATAINITIALIZED) return 0; /* Make sure rndis_set_multicast doesn't re-enable filter! */ cancel_work_sync(&dev->mcast_work); ret = rndis_filter_set_packet_filter(dev, 0); if (ret == -ENODEV) ret = 0; if (ret == 0) dev->state = RNDIS_DEV_INITIALIZED; return ret; } static void netvsc_sc_open(struct vmbus_channel *new_sc) { struct net_device *ndev = hv_get_drvdata(new_sc->primary_channel->device_obj); struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *nvscdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; int ret; /* This is safe because this callback only happens when * new device is being setup and waiting on the channel_init_wait. */ nvscdev = rcu_dereference_raw(ndev_ctx->nvdev); if (!nvscdev || chn_index >= nvscdev->num_chn) return; nvchan = nvscdev->chan_table + chn_index; /* Because the device uses NAPI, all the interrupt batching and * control is done via Net softirq, not the channel handling */ set_channel_read_mode(new_sc, HV_CALL_ISR); /* Set the channel before opening.*/ nvchan->channel = new_sc; ret = vmbus_open(new_sc, netvsc_ring_bytes, netvsc_ring_bytes, NULL, 0, netvsc_channel_cb, nvchan); if (ret == 0) napi_enable(&nvchan->napi); else netdev_notice(ndev, "sub channel open failed: %d\n", ret); if (atomic_inc_return(&nvscdev->open_chn) == nvscdev->num_chn) wake_up(&nvscdev->subchan_open); } /* Open sub-channels after completing the handling of the device probe. * This breaks overlap of processing the host message for the * new primary channel with the initialization of sub-channels. */ void rndis_set_subchannel(struct work_struct *w) { struct netvsc_device *nvdev = container_of(w, struct netvsc_device, subchan_work); struct nvsp_message *init_packet = &nvdev->channel_init_pkt; struct net_device_context *ndev_ctx; struct rndis_device *rdev; struct net_device *ndev; struct hv_device *hv_dev; int i, ret; if (!rtnl_trylock()) { schedule_work(w); return; } rdev = nvdev->extension; if (!rdev) goto unlock; /* device was removed */ ndev = rdev->ndev; ndev_ctx = netdev_priv(ndev); hv_dev = ndev_ctx->device_ctx; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE; init_packet->msg.v5_msg.subchn_req.num_subchannels = nvdev->num_chn - 1; ret = vmbus_sendpacket(hv_dev->channel, init_packet, sizeof(struct nvsp_message), (unsigned long)init_packet, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); goto failed; } wait_for_completion(&nvdev->channel_init_wait); if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { netdev_err(ndev, "sub channel request failed\n"); goto failed; } nvdev->num_chn = 1 + init_packet->msg.v5_msg.subchn_comp.num_subchannels; /* wait for all sub channels to open */ wait_event(nvdev->subchan_open, atomic_read(&nvdev->open_chn) == nvdev->num_chn); /* ignore failues from setting rss parameters, still have channels */ rndis_filter_set_rss_param(rdev, netvsc_hash_key); netif_set_real_num_tx_queues(ndev, nvdev->num_chn); netif_set_real_num_rx_queues(ndev, nvdev->num_chn); for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) ndev_ctx->tx_table[i] = i % nvdev->num_chn; netif_device_attach(ndev); rtnl_unlock(); return; failed: /* fallback to only primary channel */ for (i = 1; i < nvdev->num_chn; i++) netif_napi_del(&nvdev->chan_table[i].napi); nvdev->max_chn = 1; nvdev->num_chn = 1; netif_device_attach(ndev); unlock: rtnl_unlock(); } static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, struct netvsc_device *nvdev) { struct net_device *net = rndis_device->ndev; struct net_device_context *net_device_ctx = netdev_priv(net); struct ndis_offload hwcaps; struct ndis_offload_params offloads; unsigned int gso_max_size = GSO_MAX_SIZE; int ret; /* Find HW offload capabilities */ ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps); if (ret != 0) return ret; /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); /* Linux does not care about IP checksum, always does in kernel */ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED; /* Reset previously set hw_features flags */ net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES; net_device_ctx->tx_checksum_mask = 0; /* Compute tx offload settings based on hw capabilities */ net->hw_features |= NETIF_F_RXCSUM; if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) { /* Can checksum TCP */ net->hw_features |= NETIF_F_IP_CSUM; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP; offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) { offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; net->hw_features |= NETIF_F_TSO; if (hwcaps.lsov2.ip4_maxsz < gso_max_size) gso_max_size = hwcaps.lsov2.ip4_maxsz; } if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) { offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP; } } if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) { net->hw_features |= NETIF_F_IPV6_CSUM; offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP; if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) && (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) { offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; net->hw_features |= NETIF_F_TSO6; if (hwcaps.lsov2.ip6_maxsz < gso_max_size) gso_max_size = hwcaps.lsov2.ip6_maxsz; } if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) { offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED; net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP; } } /* In case some hw_features disappeared we need to remove them from * net->features list as they're no longer supported. */ net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features; netif_set_gso_max_size(net, gso_max_size); ret = rndis_filter_set_offload_params(net, nvdev, &offloads); return ret; } struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); struct netvsc_device *net_device; struct rndis_device *rndis_device; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; u32 num_possible_rss_qs; int i, ret; rndis_device = get_rndis_device(); if (!rndis_device) return ERR_PTR(-ENODEV); /* Let the inner driver handle this first to create the netvsc channel * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ net_device = netvsc_device_add(dev, device_info); if (IS_ERR(net_device)) { kfree(rndis_device); return net_device; } /* Initialize the rndis device */ net_device->max_chn = 1; net_device->num_chn = 1; net_device->extension = rndis_device; rndis_device->ndev = net; /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device, net_device); if (ret != 0) goto err_dev_remv; /* Get the MTU from the host */ size = sizeof(u32); ret = rndis_filter_query_device(rndis_device, net_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) net->mtu = mtu; /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device, net_device); if (ret != 0) goto err_dev_remv; memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); /* Query and set hardware capabilities */ ret = rndis_netdev_set_hwcaps(rndis_device, net_device); if (ret != 0) goto err_dev_remv; rndis_filter_query_device_link_status(rndis_device, net_device); netdev_dbg(net, "Device MAC %pM link state %s\n", rndis_device->hw_mac_adr, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) return net_device; rndis_filter_query_link_speed(rndis_device, net_device); /* vRSS setup */ memset(&rsscap, 0, rsscap_size); ret = rndis_filter_query_device(rndis_device, net_device, OID_GEN_RECEIVE_SCALE_CAPABILITIES, &rsscap, &rsscap_size); if (ret || rsscap.num_recv_que < 2) goto out; /* This guarantees that num_possible_rss_qs <= num_online_cpus */ num_possible_rss_qs = min_t(u32, num_online_cpus(), rsscap.num_recv_que); net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs); /* We will use the given number of channels if available. */ net_device->num_chn = min(net_device->max_chn, device_info->num_chn); for (i = 0; i < ITAB_NUM; i++) rndis_device->rx_table[i] = ethtool_rxfh_indir_default( i, net_device->num_chn); atomic_set(&net_device->open_chn, 1); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); for (i = 1; i < net_device->num_chn; i++) { ret = netvsc_alloc_recv_comp_ring(net_device, i); if (ret) { while (--i != 0) vfree(net_device->chan_table[i].mrc.slots); goto out; } } for (i = 1; i < net_device->num_chn; i++) netif_napi_add(net, &net_device->chan_table[i].napi, netvsc_poll, NAPI_POLL_WEIGHT); if (net_device->num_chn > 1) schedule_work(&net_device->subchan_work); out: /* if unavailable, just proceed with one queue */ if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; } /* No sub channels, device is ready */ if (net_device->num_chn == 1) netif_device_attach(net); return net_device; err_dev_remv: rndis_filter_device_remove(dev, net_device); return ERR_PTR(ret); } void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *net_dev) { struct rndis_device *rndis_dev = net_dev->extension; /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); net_dev->extension = NULL; netvsc_device_remove(dev); } int rndis_filter_open(struct netvsc_device *nvdev) { if (!nvdev) return -EINVAL; return rndis_filter_open_device(nvdev->extension); } int rndis_filter_close(struct netvsc_device *nvdev) { if (!nvdev) return -EINVAL; return rndis_filter_close_device(nvdev->extension); }
zhiyisun/linux
drivers/net/hyperv/rndis_filter.c
C
gpl-2.0
38,059
/* * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_VM_C1_C1_LIR_HPP #define SHARE_VM_C1_C1_LIR_HPP #include "c1/c1_ValueType.hpp" #include "oops/methodOop.hpp" class BlockBegin; class BlockList; class LIR_Assembler; class CodeEmitInfo; class CodeStub; class CodeStubList; class ArrayCopyStub; class LIR_Op; class ciType; class ValueType; class LIR_OpVisitState; class FpuStackSim; //--------------------------------------------------------------------- // LIR Operands // LIR_OprDesc // LIR_OprPtr // LIR_Const // LIR_Address //--------------------------------------------------------------------- class LIR_OprDesc; class LIR_OprPtr; class LIR_Const; class LIR_Address; class LIR_OprVisitor; typedef LIR_OprDesc* LIR_Opr; typedef int RegNr; define_array(LIR_OprArray, LIR_Opr) define_stack(LIR_OprList, LIR_OprArray) define_array(LIR_OprRefArray, LIR_Opr*) define_stack(LIR_OprRefList, LIR_OprRefArray) define_array(CodeEmitInfoArray, CodeEmitInfo*) define_stack(CodeEmitInfoList, CodeEmitInfoArray) define_array(LIR_OpArray, LIR_Op*) define_stack(LIR_OpList, LIR_OpArray) // define LIR_OprPtr early so LIR_OprDesc can refer to it class LIR_OprPtr: public CompilationResourceObj { public: bool is_oop_pointer() const { return (type() == T_OBJECT); } bool is_float_kind() const { BasicType t = type(); return (t == T_FLOAT) || (t == T_DOUBLE); } virtual LIR_Const* as_constant() { return NULL; } virtual LIR_Address* as_address() { return NULL; } virtual BasicType type() const = 0; virtual void print_value_on(outputStream* out) const = 0; }; // LIR constants class LIR_Const: public LIR_OprPtr { private: JavaValue _value; void type_check(BasicType t) const { assert(type() == t, "type check"); } void type_check(BasicType t1, BasicType t2) const { assert(type() == t1 || type() == t2, "type check"); } void type_check(BasicType t1, BasicType t2, BasicType t3) const { assert(type() == t1 || type() == t2 || type() == t3, "type check"); } public: LIR_Const(jint i, bool is_address=false) { _value.set_type(is_address?T_ADDRESS:T_INT); _value.set_jint(i); } LIR_Const(jlong l) { _value.set_type(T_LONG); _value.set_jlong(l); } LIR_Const(jfloat f) { _value.set_type(T_FLOAT); _value.set_jfloat(f); } LIR_Const(jdouble d) { _value.set_type(T_DOUBLE); _value.set_jdouble(d); } LIR_Const(jobject o) { _value.set_type(T_OBJECT); _value.set_jobject(o); } LIR_Const(void* p) { #ifdef _LP64 assert(sizeof(jlong) >= sizeof(p), "too small");; _value.set_type(T_LONG); _value.set_jlong((jlong)p); #else assert(sizeof(jint) >= sizeof(p), "too small");; _value.set_type(T_INT); _value.set_jint((jint)p); #endif } virtual BasicType type() const { return _value.get_type(); } virtual LIR_Const* as_constant() { return this; } jint as_jint() const { type_check(T_INT, T_ADDRESS); return _value.get_jint(); } jlong as_jlong() const { type_check(T_LONG ); return _value.get_jlong(); } jfloat as_jfloat() const { type_check(T_FLOAT ); return _value.get_jfloat(); } jdouble as_jdouble() const { type_check(T_DOUBLE); return _value.get_jdouble(); } jobject as_jobject() const { type_check(T_OBJECT); return _value.get_jobject(); } jint as_jint_lo() const { type_check(T_LONG ); return low(_value.get_jlong()); } jint as_jint_hi() const { type_check(T_LONG ); return high(_value.get_jlong()); } #ifdef _LP64 address as_pointer() const { type_check(T_LONG ); return (address)_value.get_jlong(); } #else address as_pointer() const { type_check(T_INT ); return (address)_value.get_jint(); } #endif jint as_jint_bits() const { type_check(T_FLOAT, T_INT, T_ADDRESS); return _value.get_jint(); } jint as_jint_lo_bits() const { if (type() == T_DOUBLE) { return low(jlong_cast(_value.get_jdouble())); } else { return as_jint_lo(); } } jint as_jint_hi_bits() const { if (type() == T_DOUBLE) { return high(jlong_cast(_value.get_jdouble())); } else { return as_jint_hi(); } } jlong as_jlong_bits() const { if (type() == T_DOUBLE) { return jlong_cast(_value.get_jdouble()); } else { return as_jlong(); } } virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; bool is_zero_float() { jfloat f = as_jfloat(); jfloat ok = 0.0f; return jint_cast(f) == jint_cast(ok); } bool is_one_float() { jfloat f = as_jfloat(); return !g_isnan(f) && g_isfinite(f) && f == 1.0; } bool is_zero_double() { jdouble d = as_jdouble(); jdouble ok = 0.0; return jlong_cast(d) == jlong_cast(ok); } bool is_one_double() { jdouble d = as_jdouble(); return !g_isnan(d) && g_isfinite(d) && d == 1.0; } }; //---------------------LIR Operand descriptor------------------------------------ // // The class LIR_OprDesc represents a LIR instruction operand; // it can be a register (ALU/FPU), stack location or a constant; // Constants and addresses are represented as resource area allocated // structures (see above). // Registers and stack locations are inlined into the this pointer // (see value function). class LIR_OprDesc: public CompilationResourceObj { public: // value structure: // data opr-type opr-kind // +--------------+-------+-------+ // [max...........|7 6 5 4|3 2 1 0] // ^ // is_pointer bit // // lowest bit cleared, means it is a structure pointer // we need 4 bits to represent types private: friend class LIR_OprFact; // Conversion intptr_t value() const { return (intptr_t) this; } bool check_value_mask(intptr_t mask, intptr_t masked_value) const { return (value() & mask) == masked_value; } enum OprKind { pointer_value = 0 , stack_value = 1 , cpu_register = 3 , fpu_register = 5 , illegal_value = 7 }; enum OprBits { pointer_bits = 1 , kind_bits = 3 , type_bits = 4 , size_bits = 2 , destroys_bits = 1 , virtual_bits = 1 , is_xmm_bits = 1 , last_use_bits = 1 , is_fpu_stack_offset_bits = 1 // used in assertion checking on x86 for FPU stack slot allocation , non_data_bits = kind_bits + type_bits + size_bits + destroys_bits + last_use_bits + is_fpu_stack_offset_bits + virtual_bits + is_xmm_bits , data_bits = BitsPerInt - non_data_bits , reg_bits = data_bits / 2 // for two registers in one value encoding }; enum OprShift { kind_shift = 0 , type_shift = kind_shift + kind_bits , size_shift = type_shift + type_bits , destroys_shift = size_shift + size_bits , last_use_shift = destroys_shift + destroys_bits , is_fpu_stack_offset_shift = last_use_shift + last_use_bits , virtual_shift = is_fpu_stack_offset_shift + is_fpu_stack_offset_bits , is_xmm_shift = virtual_shift + virtual_bits , data_shift = is_xmm_shift + is_xmm_bits , reg1_shift = data_shift , reg2_shift = data_shift + reg_bits }; enum OprSize { single_size = 0 << size_shift , double_size = 1 << size_shift }; enum OprMask { kind_mask = right_n_bits(kind_bits) , type_mask = right_n_bits(type_bits) << type_shift , size_mask = right_n_bits(size_bits) << size_shift , last_use_mask = right_n_bits(last_use_bits) << last_use_shift , is_fpu_stack_offset_mask = right_n_bits(is_fpu_stack_offset_bits) << is_fpu_stack_offset_shift , virtual_mask = right_n_bits(virtual_bits) << virtual_shift , is_xmm_mask = right_n_bits(is_xmm_bits) << is_xmm_shift , pointer_mask = right_n_bits(pointer_bits) , lower_reg_mask = right_n_bits(reg_bits) , no_type_mask = (int)(~(type_mask | last_use_mask | is_fpu_stack_offset_mask)) }; uintptr_t data() const { return value() >> data_shift; } int lo_reg_half() const { return data() & lower_reg_mask; } int hi_reg_half() const { return (data() >> reg_bits) & lower_reg_mask; } OprKind kind_field() const { return (OprKind)(value() & kind_mask); } OprSize size_field() const { return (OprSize)(value() & size_mask); } static char type_char(BasicType t); public: enum { vreg_base = ConcreteRegisterImpl::number_of_registers, vreg_max = (1 << data_bits) - 1 }; static inline LIR_Opr illegalOpr(); enum OprType { unknown_type = 0 << type_shift // means: not set (catch uninitialized types) , int_type = 1 << type_shift , long_type = 2 << type_shift , object_type = 3 << type_shift , address_type = 4 << type_shift , float_type = 5 << type_shift , double_type = 6 << type_shift }; friend OprType as_OprType(BasicType t); friend BasicType as_BasicType(OprType t); OprType type_field_valid() const { assert(is_register() || is_stack(), "should not be called otherwise"); return (OprType)(value() & type_mask); } OprType type_field() const { return is_illegal() ? unknown_type : (OprType)(value() & type_mask); } static OprSize size_for(BasicType t) { switch (t) { case T_LONG: case T_DOUBLE: return double_size; break; case T_FLOAT: case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: case T_ADDRESS: case T_OBJECT: case T_ARRAY: return single_size; break; default: ShouldNotReachHere(); return single_size; } } void validate_type() const PRODUCT_RETURN; BasicType type() const { if (is_pointer()) { return pointer()->type(); } return as_BasicType(type_field()); } ValueType* value_type() const { return as_ValueType(type()); } char type_char() const { return type_char((is_pointer()) ? pointer()->type() : type()); } bool is_equal(LIR_Opr opr) const { return this == opr; } // checks whether types are same bool is_same_type(LIR_Opr opr) const { assert(type_field() != unknown_type && opr->type_field() != unknown_type, "shouldn't see unknown_type"); return type_field() == opr->type_field(); } bool is_same_register(LIR_Opr opr) { return (is_register() && opr->is_register() && kind_field() == opr->kind_field() && (value() & no_type_mask) == (opr->value() & no_type_mask)); } bool is_pointer() const { return check_value_mask(pointer_mask, pointer_value); } bool is_illegal() const { return kind_field() == illegal_value; } bool is_valid() const { return kind_field() != illegal_value; } bool is_register() const { return is_cpu_register() || is_fpu_register(); } bool is_virtual() const { return is_virtual_cpu() || is_virtual_fpu(); } bool is_constant() const { return is_pointer() && pointer()->as_constant() != NULL; } bool is_address() const { return is_pointer() && pointer()->as_address() != NULL; } bool is_float_kind() const { return is_pointer() ? pointer()->is_float_kind() : (kind_field() == fpu_register); } bool is_oop() const; // semantic for fpu- and xmm-registers: // * is_float and is_double return true for xmm_registers // (so is_single_fpu and is_single_xmm are true) // * So you must always check for is_???_xmm prior to is_???_fpu to // distinguish between fpu- and xmm-registers bool is_stack() const { validate_type(); return check_value_mask(kind_mask, stack_value); } bool is_single_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | single_size); } bool is_double_stack() const { validate_type(); return check_value_mask(kind_mask | size_mask, stack_value | double_size); } bool is_cpu_register() const { validate_type(); return check_value_mask(kind_mask, cpu_register); } bool is_virtual_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register | virtual_mask); } bool is_fixed_cpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, cpu_register); } bool is_single_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | single_size); } bool is_double_cpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, cpu_register | double_size); } bool is_fpu_register() const { validate_type(); return check_value_mask(kind_mask, fpu_register); } bool is_virtual_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register | virtual_mask); } bool is_fixed_fpu() const { validate_type(); return check_value_mask(kind_mask | virtual_mask, fpu_register); } bool is_single_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | single_size); } bool is_double_fpu() const { validate_type(); return check_value_mask(kind_mask | size_mask, fpu_register | double_size); } bool is_xmm_register() const { validate_type(); return check_value_mask(kind_mask | is_xmm_mask, fpu_register | is_xmm_mask); } bool is_single_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | single_size | is_xmm_mask); } bool is_double_xmm() const { validate_type(); return check_value_mask(kind_mask | size_mask | is_xmm_mask, fpu_register | double_size | is_xmm_mask); } // fast accessor functions for special bits that do not work for pointers // (in this functions, the check for is_pointer() is omitted) bool is_single_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, single_size); } bool is_double_word() const { assert(is_register() || is_stack(), "type check"); return check_value_mask(size_mask, double_size); } bool is_virtual_register() const { assert(is_register(), "type check"); return check_value_mask(virtual_mask, virtual_mask); } bool is_oop_register() const { assert(is_register() || is_stack(), "type check"); return type_field_valid() == object_type; } BasicType type_register() const { assert(is_register() || is_stack(), "type check"); return as_BasicType(type_field_valid()); } bool is_last_use() const { assert(is_register(), "only works for registers"); return (value() & last_use_mask) != 0; } bool is_fpu_stack_offset() const { assert(is_register(), "only works for registers"); return (value() & is_fpu_stack_offset_mask) != 0; } LIR_Opr make_last_use() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | last_use_mask); } LIR_Opr make_fpu_stack_offset() { assert(is_register(), "only works for registers"); return (LIR_Opr)(value() | is_fpu_stack_offset_mask); } int single_stack_ix() const { assert(is_single_stack() && !is_virtual(), "type check"); return (int)data(); } int double_stack_ix() const { assert(is_double_stack() && !is_virtual(), "type check"); return (int)data(); } RegNr cpu_regnr() const { assert(is_single_cpu() && !is_virtual(), "type check"); return (RegNr)data(); } RegNr cpu_regnrLo() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); } RegNr cpu_regnrHi() const { assert(is_double_cpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); } RegNr fpu_regnr() const { assert(is_single_fpu() && !is_virtual(), "type check"); return (RegNr)data(); } RegNr fpu_regnrLo() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); } RegNr fpu_regnrHi() const { assert(is_double_fpu() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); } RegNr xmm_regnr() const { assert(is_single_xmm() && !is_virtual(), "type check"); return (RegNr)data(); } RegNr xmm_regnrLo() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)lo_reg_half(); } RegNr xmm_regnrHi() const { assert(is_double_xmm() && !is_virtual(), "type check"); return (RegNr)hi_reg_half(); } int vreg_number() const { assert(is_virtual(), "type check"); return (RegNr)data(); } LIR_OprPtr* pointer() const { assert(is_pointer(), "type check"); return (LIR_OprPtr*)this; } LIR_Const* as_constant_ptr() const { return pointer()->as_constant(); } LIR_Address* as_address_ptr() const { return pointer()->as_address(); } Register as_register() const; Register as_register_lo() const; Register as_register_hi() const; Register as_pointer_register() { #ifdef _LP64 if (is_double_cpu()) { assert(as_register_lo() == as_register_hi(), "should be a single register"); return as_register_lo(); } #endif return as_register(); } #ifdef X86 XMMRegister as_xmm_float_reg() const; XMMRegister as_xmm_double_reg() const; // for compatibility with RInfo int fpu () const { return lo_reg_half(); } #endif // X86 #if defined(SPARC) || defined(ARM) || defined(PPC) FloatRegister as_float_reg () const; FloatRegister as_double_reg () const; #endif jint as_jint() const { return as_constant_ptr()->as_jint(); } jlong as_jlong() const { return as_constant_ptr()->as_jlong(); } jfloat as_jfloat() const { return as_constant_ptr()->as_jfloat(); } jdouble as_jdouble() const { return as_constant_ptr()->as_jdouble(); } jobject as_jobject() const { return as_constant_ptr()->as_jobject(); } void print() const PRODUCT_RETURN; void print(outputStream* out) const PRODUCT_RETURN; }; inline LIR_OprDesc::OprType as_OprType(BasicType type) { switch (type) { case T_INT: return LIR_OprDesc::int_type; case T_LONG: return LIR_OprDesc::long_type; case T_FLOAT: return LIR_OprDesc::float_type; case T_DOUBLE: return LIR_OprDesc::double_type; case T_OBJECT: case T_ARRAY: return LIR_OprDesc::object_type; case T_ADDRESS: return LIR_OprDesc::address_type; case T_ILLEGAL: // fall through default: ShouldNotReachHere(); return LIR_OprDesc::unknown_type; } } inline BasicType as_BasicType(LIR_OprDesc::OprType t) { switch (t) { case LIR_OprDesc::int_type: return T_INT; case LIR_OprDesc::long_type: return T_LONG; case LIR_OprDesc::float_type: return T_FLOAT; case LIR_OprDesc::double_type: return T_DOUBLE; case LIR_OprDesc::object_type: return T_OBJECT; case LIR_OprDesc::address_type: return T_ADDRESS; case LIR_OprDesc::unknown_type: // fall through default: ShouldNotReachHere(); return T_ILLEGAL; } } // LIR_Address class LIR_Address: public LIR_OprPtr { friend class LIR_OpVisitState; public: // NOTE: currently these must be the log2 of the scale factor (and // must also be equivalent to the ScaleFactor enum in // assembler_i486.hpp) enum Scale { times_1 = 0, times_2 = 1, times_4 = 2, times_8 = 3 }; private: LIR_Opr _base; LIR_Opr _index; Scale _scale; intx _disp; BasicType _type; public: LIR_Address(LIR_Opr base, LIR_Opr index, BasicType type): _base(base) , _index(index) , _scale(times_1) , _type(type) , _disp(0) { verify(); } LIR_Address(LIR_Opr base, intx disp, BasicType type): _base(base) , _index(LIR_OprDesc::illegalOpr()) , _scale(times_1) , _type(type) , _disp(disp) { verify(); } LIR_Address(LIR_Opr base, BasicType type): _base(base) , _index(LIR_OprDesc::illegalOpr()) , _scale(times_1) , _type(type) , _disp(0) { verify(); } #if defined(X86) || defined(ARM) LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, intx disp, BasicType type): _base(base) , _index(index) , _scale(scale) , _type(type) , _disp(disp) { verify(); } #endif // X86 || ARM LIR_Opr base() const { return _base; } LIR_Opr index() const { return _index; } Scale scale() const { return _scale; } intx disp() const { return _disp; } bool equals(LIR_Address* other) const { return base() == other->base() && index() == other->index() && disp() == other->disp() && scale() == other->scale(); } virtual LIR_Address* as_address() { return this; } virtual BasicType type() const { return _type; } virtual void print_value_on(outputStream* out) const PRODUCT_RETURN; void verify() const PRODUCT_RETURN; static Scale scale(BasicType type); }; // operand factory class LIR_OprFact: public AllStatic { public: static LIR_Opr illegalOpr; static LIR_Opr single_cpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr single_cpu_address(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::address_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr double_cpu(int reg1, int reg2) { LP64_ONLY(assert(reg1 == reg2, "must be identical")); return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } static LIR_Opr single_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); } #if defined(ARM) static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } #endif #ifdef SPARC static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } #endif #ifdef X86 static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } static LIR_Opr single_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::is_xmm_mask); } static LIR_Opr double_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); } #endif // X86 #ifdef PPC static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); } static LIR_Opr single_softfp(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); } static LIR_Opr double_softfp(int reg1, int reg2) { return (LIR_Opr)((reg2 << LIR_OprDesc::reg1_shift) | (reg1 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); } #endif // PPC static LIR_Opr virtual_register(int index, BasicType type) { LIR_Opr res; switch (type) { case T_OBJECT: // fall through case T_ARRAY: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; case T_INT: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; case T_ADDRESS: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::address_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; case T_LONG: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; #ifdef __SOFTFP__ case T_FLOAT: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; case T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; #else // __SOFTFP__ case T_FLOAT: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break; case T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break; #endif // __SOFTFP__ default: ShouldNotReachHere(); res = illegalOpr; } #ifdef ASSERT res->validate_type(); assert(res->vreg_number() == index, "conversion check"); assert(index >= LIR_OprDesc::vreg_base, "must start at vreg_base"); assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big"); // old-style calculation; check if old and new method are equal LIR_OprDesc::OprType t = as_OprType(type); #ifdef __SOFTFP__ LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t | LIR_OprDesc::cpu_register | LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask); #else // __SOFTFP__ LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t | ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) | LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask); assert(res == old_res, "old and new method not equal"); #endif // __SOFTFP__ #endif // ASSERT return res; } // 'index' is computed by FrameMap::local_stack_pos(index); do not use other parameters as // the index is platform independent; a double stack useing indeces 2 and 3 has always // index 2. static LIR_Opr stack(int index, BasicType type) { LIR_Opr res; switch (type) { case T_OBJECT: // fall through case T_ARRAY: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; case T_INT: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; case T_ADDRESS: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::address_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; case T_LONG: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break; case T_FLOAT: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break; case T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break; default: ShouldNotReachHere(); res = illegalOpr; } #ifdef ASSERT assert(index >= 0, "index must be positive"); assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big"); LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::stack_value | as_OprType(type) | LIR_OprDesc::size_for(type)); assert(res == old_res, "old and new method not equal"); #endif return res; } static LIR_Opr intConst(jint i) { return (LIR_Opr)(new LIR_Const(i)); } static LIR_Opr longConst(jlong l) { return (LIR_Opr)(new LIR_Const(l)); } static LIR_Opr floatConst(jfloat f) { return (LIR_Opr)(new LIR_Const(f)); } static LIR_Opr doubleConst(jdouble d) { return (LIR_Opr)(new LIR_Const(d)); } static LIR_Opr oopConst(jobject o) { return (LIR_Opr)(new LIR_Const(o)); } static LIR_Opr address(LIR_Address* a) { return (LIR_Opr)a; } static LIR_Opr intptrConst(void* p) { return (LIR_Opr)(new LIR_Const(p)); } static LIR_Opr intptrConst(intptr_t v) { return (LIR_Opr)(new LIR_Const((void*)v)); } static LIR_Opr illegal() { return (LIR_Opr)-1; } static LIR_Opr addressConst(jint i) { return (LIR_Opr)(new LIR_Const(i, true)); } static LIR_Opr value_type(ValueType* type); static LIR_Opr dummy_value_type(ValueType* type); }; //------------------------------------------------------------------------------- // LIR Instructions //------------------------------------------------------------------------------- // // Note: // - every instruction has a result operand // - every instruction has an CodeEmitInfo operand (can be revisited later) // - every instruction has a LIR_OpCode operand // - LIR_OpN, means an instruction that has N input operands // // class hierarchy: // class LIR_Op; class LIR_Op0; class LIR_OpLabel; class LIR_Op1; class LIR_OpBranch; class LIR_OpConvert; class LIR_OpAllocObj; class LIR_OpRoundFP; class LIR_Op2; class LIR_OpDelay; class LIR_Op3; class LIR_OpAllocArray; class LIR_OpCall; class LIR_OpJavaCall; class LIR_OpRTCall; class LIR_OpArrayCopy; class LIR_OpLock; class LIR_OpTypeCheck; class LIR_OpCompareAndSwap; class LIR_OpProfileCall; // LIR operation codes enum LIR_Code { lir_none , begin_op0 , lir_word_align , lir_label , lir_nop , lir_backwardbranch_target , lir_std_entry , lir_osr_entry , lir_build_frame , lir_fpop_raw , lir_24bit_FPU , lir_reset_FPU , lir_breakpoint , lir_rtcall , lir_membar , lir_membar_acquire , lir_membar_release , lir_membar_loadload , lir_membar_storestore , lir_membar_loadstore , lir_membar_storeload , lir_get_thread , end_op0 , begin_op1 , lir_fxch , lir_fld , lir_ffree , lir_push , lir_pop , lir_null_check , lir_return , lir_leal , lir_neg , lir_branch , lir_cond_float_branch , lir_move , lir_prefetchr , lir_prefetchw , lir_convert , lir_alloc_object , lir_monaddr , lir_roundfp , lir_safepoint , lir_pack64 , lir_unpack64 , lir_unwind , end_op1 , begin_op2 , lir_cmp , lir_cmp_l2i , lir_ucmp_fd2i , lir_cmp_fd2i , lir_cmove , lir_add , lir_sub , lir_mul , lir_mul_strictfp , lir_div , lir_div_strictfp , lir_rem , lir_sqrt , lir_abs , lir_sin , lir_cos , lir_tan , lir_log , lir_log10 , lir_exp , lir_pow , lir_logic_and , lir_logic_or , lir_logic_xor , lir_shl , lir_shr , lir_ushr , lir_alloc_array , lir_throw , lir_compare_to , lir_xadd , lir_xchg , end_op2 , begin_op3 , lir_idiv , lir_irem , end_op3 , begin_opJavaCall , lir_static_call , lir_optvirtual_call , lir_icvirtual_call , lir_virtual_call , lir_dynamic_call , end_opJavaCall , begin_opArrayCopy , lir_arraycopy , end_opArrayCopy , begin_opLock , lir_lock , lir_unlock , end_opLock , begin_delay_slot , lir_delay_slot , end_delay_slot , begin_opTypeCheck , lir_instanceof , lir_checkcast , lir_store_check , end_opTypeCheck , begin_opCompareAndSwap , lir_cas_long , lir_cas_obj , lir_cas_int , end_opCompareAndSwap , begin_opMDOProfile , lir_profile_call , end_opMDOProfile }; enum LIR_Condition { lir_cond_equal , lir_cond_notEqual , lir_cond_less , lir_cond_lessEqual , lir_cond_greaterEqual , lir_cond_greater , lir_cond_belowEqual , lir_cond_aboveEqual , lir_cond_always , lir_cond_unknown = -1 }; enum LIR_PatchCode { lir_patch_none, lir_patch_low, lir_patch_high, lir_patch_normal }; enum LIR_MoveKind { lir_move_normal, lir_move_volatile, lir_move_unaligned, lir_move_wide, lir_move_max_flag }; // -------------------------------------------------- // LIR_Op // -------------------------------------------------- class LIR_Op: public CompilationResourceObj { friend class LIR_OpVisitState; #ifdef ASSERT private: const char * _file; int _line; #endif protected: LIR_Opr _result; unsigned short _code; unsigned short _flags; CodeEmitInfo* _info; int _id; // value id for register allocation int _fpu_pop_count; Instruction* _source; // for debugging static void print_condition(outputStream* out, LIR_Condition cond) PRODUCT_RETURN; protected: static bool is_in_range(LIR_Code test, LIR_Code start, LIR_Code end) { return start < test && test < end; } public: LIR_Op() : _result(LIR_OprFact::illegalOpr) , _code(lir_none) , _flags(0) , _info(NULL) #ifdef ASSERT , _file(NULL) , _line(0) #endif , _fpu_pop_count(0) , _source(NULL) , _id(-1) {} LIR_Op(LIR_Code code, LIR_Opr result, CodeEmitInfo* info) : _result(result) , _code(code) , _flags(0) , _info(info) #ifdef ASSERT , _file(NULL) , _line(0) #endif , _fpu_pop_count(0) , _source(NULL) , _id(-1) {} CodeEmitInfo* info() const { return _info; } LIR_Code code() const { return (LIR_Code)_code; } LIR_Opr result_opr() const { return _result; } void set_result_opr(LIR_Opr opr) { _result = opr; } #ifdef ASSERT void set_file_and_line(const char * file, int line) { _file = file; _line = line; } #endif virtual const char * name() const PRODUCT_RETURN0; int id() const { return _id; } void set_id(int id) { _id = id; } // FPU stack simulation helpers -- only used on Intel void set_fpu_pop_count(int count) { assert(count >= 0 && count <= 1, "currently only 0 and 1 are valid"); _fpu_pop_count = count; } int fpu_pop_count() const { return _fpu_pop_count; } bool pop_fpu_stack() { return _fpu_pop_count > 0; } Instruction* source() const { return _source; } void set_source(Instruction* ins) { _source = ins; } virtual void emit_code(LIR_Assembler* masm) = 0; virtual void print_instr(outputStream* out) const = 0; virtual void print_on(outputStream* st) const PRODUCT_RETURN; virtual LIR_OpCall* as_OpCall() { return NULL; } virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; } virtual LIR_OpLabel* as_OpLabel() { return NULL; } virtual LIR_OpDelay* as_OpDelay() { return NULL; } virtual LIR_OpLock* as_OpLock() { return NULL; } virtual LIR_OpAllocArray* as_OpAllocArray() { return NULL; } virtual LIR_OpAllocObj* as_OpAllocObj() { return NULL; } virtual LIR_OpRoundFP* as_OpRoundFP() { return NULL; } virtual LIR_OpBranch* as_OpBranch() { return NULL; } virtual LIR_OpRTCall* as_OpRTCall() { return NULL; } virtual LIR_OpConvert* as_OpConvert() { return NULL; } virtual LIR_Op0* as_Op0() { return NULL; } virtual LIR_Op1* as_Op1() { return NULL; } virtual LIR_Op2* as_Op2() { return NULL; } virtual LIR_Op3* as_Op3() { return NULL; } virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } virtual void verify() const {} }; // for calls class LIR_OpCall: public LIR_Op { friend class LIR_OpVisitState; protected: address _addr; LIR_OprList* _arguments; protected: LIR_OpCall(LIR_Code code, address addr, LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL) : LIR_Op(code, result, info) , _arguments(arguments) , _addr(addr) {} public: address addr() const { return _addr; } const LIR_OprList* arguments() const { return _arguments; } virtual LIR_OpCall* as_OpCall() { return this; } }; // -------------------------------------------------- // LIR_OpJavaCall // -------------------------------------------------- class LIR_OpJavaCall: public LIR_OpCall { friend class LIR_OpVisitState; private: ciMethod* _method; LIR_Opr _receiver; LIR_Opr _method_handle_invoke_SP_save_opr; // Used in LIR_OpVisitState::visit to store the reference to FrameMap::method_handle_invoke_SP_save_opr. public: LIR_OpJavaCall(LIR_Code code, ciMethod* method, LIR_Opr receiver, LIR_Opr result, address addr, LIR_OprList* arguments, CodeEmitInfo* info) : LIR_OpCall(code, addr, result, arguments, info) , _receiver(receiver) , _method(method) , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); } LIR_OpJavaCall(LIR_Code code, ciMethod* method, LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) : LIR_OpCall(code, (address)vtable_offset, result, arguments, info) , _receiver(receiver) , _method(method) , _method_handle_invoke_SP_save_opr(LIR_OprFact::illegalOpr) { assert(is_in_range(code, begin_opJavaCall, end_opJavaCall), "code check"); } LIR_Opr receiver() const { return _receiver; } ciMethod* method() const { return _method; } // JSR 292 support. bool is_invokedynamic() const { return code() == lir_dynamic_call; } bool is_method_handle_invoke() const { return is_invokedynamic() // An invokedynamic is always a MethodHandle call site. || method()->is_compiled_lambda_form() // Java-generated adapter || method()->is_method_handle_intrinsic(); // JVM-generated MH intrinsic } intptr_t vtable_offset() const { assert(_code == lir_virtual_call, "only have vtable for real vcall"); return (intptr_t) addr(); } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpJavaCall* as_OpJavaCall() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; // -------------------------------------------------- // LIR_OpLabel // -------------------------------------------------- // Location where a branch can continue class LIR_OpLabel: public LIR_Op { friend class LIR_OpVisitState; private: Label* _label; public: LIR_OpLabel(Label* lbl) : LIR_Op(lir_label, LIR_OprFact::illegalOpr, NULL) , _label(lbl) {} Label* label() const { return _label; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpLabel* as_OpLabel() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; // LIR_OpArrayCopy class LIR_OpArrayCopy: public LIR_Op { friend class LIR_OpVisitState; private: ArrayCopyStub* _stub; LIR_Opr _src; LIR_Opr _src_pos; LIR_Opr _dst; LIR_Opr _dst_pos; LIR_Opr _length; LIR_Opr _tmp; ciArrayKlass* _expected_type; int _flags; public: enum Flags { src_null_check = 1 << 0, dst_null_check = 1 << 1, src_pos_positive_check = 1 << 2, dst_pos_positive_check = 1 << 3, length_positive_check = 1 << 4, src_range_check = 1 << 5, dst_range_check = 1 << 6, type_check = 1 << 7, overlapping = 1 << 8, unaligned = 1 << 9, src_objarray = 1 << 10, dst_objarray = 1 << 11, all_flags = (1 << 12) - 1 }; LIR_OpArrayCopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info); LIR_Opr src() const { return _src; } LIR_Opr src_pos() const { return _src_pos; } LIR_Opr dst() const { return _dst; } LIR_Opr dst_pos() const { return _dst_pos; } LIR_Opr length() const { return _length; } LIR_Opr tmp() const { return _tmp; } int flags() const { return _flags; } ciArrayKlass* expected_type() const { return _expected_type; } ArrayCopyStub* stub() const { return _stub; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpArrayCopy* as_OpArrayCopy() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; }; // -------------------------------------------------- // LIR_Op0 // -------------------------------------------------- class LIR_Op0: public LIR_Op { friend class LIR_OpVisitState; public: LIR_Op0(LIR_Code code) : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) { assert(is_in_range(code, begin_op0, end_op0), "code check"); } LIR_Op0(LIR_Code code, LIR_Opr result, CodeEmitInfo* info = NULL) : LIR_Op(code, result, info) { assert(is_in_range(code, begin_op0, end_op0), "code check"); } virtual void emit_code(LIR_Assembler* masm); virtual LIR_Op0* as_Op0() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; // -------------------------------------------------- // LIR_Op1 // -------------------------------------------------- class LIR_Op1: public LIR_Op { friend class LIR_OpVisitState; protected: LIR_Opr _opr; // input operand BasicType _type; // Operand types LIR_PatchCode _patch; // only required with patchin (NEEDS_CLEANUP: do we want a special instruction for patching?) static void print_patch_code(outputStream* out, LIR_PatchCode code); void set_kind(LIR_MoveKind kind) { assert(code() == lir_move, "must be"); _flags = kind; } public: LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result = LIR_OprFact::illegalOpr, BasicType type = T_ILLEGAL, LIR_PatchCode patch = lir_patch_none, CodeEmitInfo* info = NULL) : LIR_Op(code, result, info) , _opr(opr) , _patch(patch) , _type(type) { assert(is_in_range(code, begin_op1, end_op1), "code check"); } LIR_Op1(LIR_Code code, LIR_Opr opr, LIR_Opr result, BasicType type, LIR_PatchCode patch, CodeEmitInfo* info, LIR_MoveKind kind) : LIR_Op(code, result, info) , _opr(opr) , _patch(patch) , _type(type) { assert(code == lir_move, "must be"); set_kind(kind); } LIR_Op1(LIR_Code code, LIR_Opr opr, CodeEmitInfo* info) : LIR_Op(code, LIR_OprFact::illegalOpr, info) , _opr(opr) , _patch(lir_patch_none) , _type(T_ILLEGAL) { assert(is_in_range(code, begin_op1, end_op1), "code check"); } LIR_Opr in_opr() const { return _opr; } LIR_PatchCode patch_code() const { return _patch; } BasicType type() const { return _type; } LIR_MoveKind move_kind() const { assert(code() == lir_move, "must be"); return (LIR_MoveKind)_flags; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_Op1* as_Op1() { return this; } virtual const char * name() const PRODUCT_RETURN0; void set_in_opr(LIR_Opr opr) { _opr = opr; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; virtual void verify() const; }; // for runtime calls class LIR_OpRTCall: public LIR_OpCall { friend class LIR_OpVisitState; private: LIR_Opr _tmp; public: LIR_OpRTCall(address addr, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info = NULL) : LIR_OpCall(lir_rtcall, addr, result, arguments, info) , _tmp(tmp) {} virtual void print_instr(outputStream* out) const PRODUCT_RETURN; virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpRTCall* as_OpRTCall() { return this; } LIR_Opr tmp() const { return _tmp; } virtual void verify() const; }; class LIR_OpBranch: public LIR_Op { friend class LIR_OpVisitState; private: LIR_Condition _cond; BasicType _type; Label* _label; BlockBegin* _block; // if this is a branch to a block, this is the block BlockBegin* _ublock; // if this is a float-branch, this is the unorderd block CodeStub* _stub; // if this is a branch to a stub, this is the stub public: LIR_OpBranch(LIR_Condition cond, BasicType type, Label* lbl) : LIR_Op(lir_branch, LIR_OprFact::illegalOpr, (CodeEmitInfo*) NULL) , _cond(cond) , _type(type) , _label(lbl) , _block(NULL) , _ublock(NULL) , _stub(NULL) { } LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block); LIR_OpBranch(LIR_Condition cond, BasicType type, CodeStub* stub); // for unordered comparisons LIR_OpBranch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* ublock); LIR_Condition cond() const { return _cond; } BasicType type() const { return _type; } Label* label() const { return _label; } BlockBegin* block() const { return _block; } BlockBegin* ublock() const { return _ublock; } CodeStub* stub() const { return _stub; } void change_block(BlockBegin* b); void change_ublock(BlockBegin* b); void negate_cond(); virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpBranch* as_OpBranch() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; class ConversionStub; class LIR_OpConvert: public LIR_Op1 { friend class LIR_OpVisitState; private: Bytecodes::Code _bytecode; ConversionStub* _stub; #ifdef PPC LIR_Opr _tmp1; LIR_Opr _tmp2; #endif public: LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub) : LIR_Op1(lir_convert, opr, result) , _stub(stub) #ifdef PPC , _tmp1(LIR_OprDesc::illegalOpr()) , _tmp2(LIR_OprDesc::illegalOpr()) #endif , _bytecode(code) {} #ifdef PPC LIR_OpConvert(Bytecodes::Code code, LIR_Opr opr, LIR_Opr result, ConversionStub* stub ,LIR_Opr tmp1, LIR_Opr tmp2) : LIR_Op1(lir_convert, opr, result) , _stub(stub) , _tmp1(tmp1) , _tmp2(tmp2) , _bytecode(code) {} #endif Bytecodes::Code bytecode() const { return _bytecode; } ConversionStub* stub() const { return _stub; } #ifdef PPC LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } #endif virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpConvert* as_OpConvert() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; static void print_bytecode(outputStream* out, Bytecodes::Code code) PRODUCT_RETURN; }; // LIR_OpAllocObj class LIR_OpAllocObj : public LIR_Op1 { friend class LIR_OpVisitState; private: LIR_Opr _tmp1; LIR_Opr _tmp2; LIR_Opr _tmp3; LIR_Opr _tmp4; int _hdr_size; int _obj_size; CodeStub* _stub; bool _init_check; public: LIR_OpAllocObj(LIR_Opr klass, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int hdr_size, int obj_size, bool init_check, CodeStub* stub) : LIR_Op1(lir_alloc_object, klass, result) , _tmp1(t1) , _tmp2(t2) , _tmp3(t3) , _tmp4(t4) , _hdr_size(hdr_size) , _obj_size(obj_size) , _init_check(init_check) , _stub(stub) { } LIR_Opr klass() const { return in_opr(); } LIR_Opr obj() const { return result_opr(); } LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } LIR_Opr tmp3() const { return _tmp3; } LIR_Opr tmp4() const { return _tmp4; } int header_size() const { return _hdr_size; } int object_size() const { return _obj_size; } bool init_check() const { return _init_check; } CodeStub* stub() const { return _stub; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpAllocObj * as_OpAllocObj () { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; // LIR_OpRoundFP class LIR_OpRoundFP : public LIR_Op1 { friend class LIR_OpVisitState; private: LIR_Opr _tmp; public: LIR_OpRoundFP(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) : LIR_Op1(lir_roundfp, reg, result) , _tmp(stack_loc_temp) {} LIR_Opr tmp() const { return _tmp; } virtual LIR_OpRoundFP* as_OpRoundFP() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; }; // LIR_OpTypeCheck class LIR_OpTypeCheck: public LIR_Op { friend class LIR_OpVisitState; private: LIR_Opr _object; LIR_Opr _array; ciKlass* _klass; LIR_Opr _tmp1; LIR_Opr _tmp2; LIR_Opr _tmp3; bool _fast_check; CodeEmitInfo* _info_for_patch; CodeEmitInfo* _info_for_exception; CodeStub* _stub; ciMethod* _profiled_method; int _profiled_bci; bool _should_profile; public: LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub); LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception); LIR_Opr object() const { return _object; } LIR_Opr array() const { assert(code() == lir_store_check, "not valid"); return _array; } LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } LIR_Opr tmp3() const { return _tmp3; } ciKlass* klass() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _klass; } bool fast_check() const { assert(code() == lir_instanceof || code() == lir_checkcast, "not valid"); return _fast_check; } CodeEmitInfo* info_for_patch() const { return _info_for_patch; } CodeEmitInfo* info_for_exception() const { return _info_for_exception; } CodeStub* stub() const { return _stub; } // methodDataOop profiling void set_profiled_method(ciMethod *method) { _profiled_method = method; } void set_profiled_bci(int bci) { _profiled_bci = bci; } void set_should_profile(bool b) { _should_profile = b; } ciMethod* profiled_method() const { return _profiled_method; } int profiled_bci() const { return _profiled_bci; } bool should_profile() const { return _should_profile; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; }; // LIR_Op2 class LIR_Op2: public LIR_Op { friend class LIR_OpVisitState; int _fpu_stack_size; // for sin/cos implementation on Intel protected: LIR_Opr _opr1; LIR_Opr _opr2; BasicType _type; LIR_Opr _tmp1; LIR_Opr _tmp2; LIR_Opr _tmp3; LIR_Opr _tmp4; LIR_Opr _tmp5; LIR_Condition _condition; void verify() const; public: LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, CodeEmitInfo* info = NULL) : LIR_Op(code, LIR_OprFact::illegalOpr, info) , _opr1(opr1) , _opr2(opr2) , _type(T_ILLEGAL) , _condition(condition) , _fpu_stack_size(0) , _tmp1(LIR_OprFact::illegalOpr) , _tmp2(LIR_OprFact::illegalOpr) , _tmp3(LIR_OprFact::illegalOpr) , _tmp4(LIR_OprFact::illegalOpr) , _tmp5(LIR_OprFact::illegalOpr) { assert(code == lir_cmp, "code check"); } LIR_Op2(LIR_Code code, LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) : LIR_Op(code, result, NULL) , _opr1(opr1) , _opr2(opr2) , _type(type) , _condition(condition) , _fpu_stack_size(0) , _tmp1(LIR_OprFact::illegalOpr) , _tmp2(LIR_OprFact::illegalOpr) , _tmp3(LIR_OprFact::illegalOpr) , _tmp4(LIR_OprFact::illegalOpr) , _tmp5(LIR_OprFact::illegalOpr) { assert(code == lir_cmove, "code check"); assert(type != T_ILLEGAL, "cmove should have type"); } LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result = LIR_OprFact::illegalOpr, CodeEmitInfo* info = NULL, BasicType type = T_ILLEGAL) : LIR_Op(code, result, info) , _opr1(opr1) , _opr2(opr2) , _type(type) , _condition(lir_cond_unknown) , _fpu_stack_size(0) , _tmp1(LIR_OprFact::illegalOpr) , _tmp2(LIR_OprFact::illegalOpr) , _tmp3(LIR_OprFact::illegalOpr) , _tmp4(LIR_OprFact::illegalOpr) , _tmp5(LIR_OprFact::illegalOpr) { assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); } LIR_Op2(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, LIR_Opr tmp1, LIR_Opr tmp2 = LIR_OprFact::illegalOpr, LIR_Opr tmp3 = LIR_OprFact::illegalOpr, LIR_Opr tmp4 = LIR_OprFact::illegalOpr, LIR_Opr tmp5 = LIR_OprFact::illegalOpr) : LIR_Op(code, result, NULL) , _opr1(opr1) , _opr2(opr2) , _type(T_ILLEGAL) , _condition(lir_cond_unknown) , _fpu_stack_size(0) , _tmp1(tmp1) , _tmp2(tmp2) , _tmp3(tmp3) , _tmp4(tmp4) , _tmp5(tmp5) { assert(code != lir_cmp && is_in_range(code, begin_op2, end_op2), "code check"); } LIR_Opr in_opr1() const { return _opr1; } LIR_Opr in_opr2() const { return _opr2; } BasicType type() const { return _type; } LIR_Opr tmp1_opr() const { return _tmp1; } LIR_Opr tmp2_opr() const { return _tmp2; } LIR_Opr tmp3_opr() const { return _tmp3; } LIR_Opr tmp4_opr() const { return _tmp4; } LIR_Opr tmp5_opr() const { return _tmp5; } LIR_Condition condition() const { assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); return _condition; } void set_condition(LIR_Condition condition) { assert(code() == lir_cmp || code() == lir_cmove, "only valid for cmp and cmove"); _condition = condition; } void set_fpu_stack_size(int size) { _fpu_stack_size = size; } int fpu_stack_size() const { return _fpu_stack_size; } void set_in_opr1(LIR_Opr opr) { _opr1 = opr; } void set_in_opr2(LIR_Opr opr) { _opr2 = opr; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_Op2* as_Op2() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; class LIR_OpAllocArray : public LIR_Op { friend class LIR_OpVisitState; private: LIR_Opr _klass; LIR_Opr _len; LIR_Opr _tmp1; LIR_Opr _tmp2; LIR_Opr _tmp3; LIR_Opr _tmp4; BasicType _type; CodeStub* _stub; public: LIR_OpAllocArray(LIR_Opr klass, LIR_Opr len, LIR_Opr result, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, BasicType type, CodeStub* stub) : LIR_Op(lir_alloc_array, result, NULL) , _klass(klass) , _len(len) , _tmp1(t1) , _tmp2(t2) , _tmp3(t3) , _tmp4(t4) , _type(type) , _stub(stub) {} LIR_Opr klass() const { return _klass; } LIR_Opr len() const { return _len; } LIR_Opr obj() const { return result_opr(); } LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } LIR_Opr tmp3() const { return _tmp3; } LIR_Opr tmp4() const { return _tmp4; } BasicType type() const { return _type; } CodeStub* stub() const { return _stub; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpAllocArray * as_OpAllocArray () { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; class LIR_Op3: public LIR_Op { friend class LIR_OpVisitState; private: LIR_Opr _opr1; LIR_Opr _opr2; LIR_Opr _opr3; public: LIR_Op3(LIR_Code code, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr opr3, LIR_Opr result, CodeEmitInfo* info = NULL) : LIR_Op(code, result, info) , _opr1(opr1) , _opr2(opr2) , _opr3(opr3) { assert(is_in_range(code, begin_op3, end_op3), "code check"); } LIR_Opr in_opr1() const { return _opr1; } LIR_Opr in_opr2() const { return _opr2; } LIR_Opr in_opr3() const { return _opr3; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_Op3* as_Op3() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; //-------------------------------- class LabelObj: public CompilationResourceObj { private: Label _label; public: LabelObj() {} Label* label() { return &_label; } }; class LIR_OpLock: public LIR_Op { friend class LIR_OpVisitState; private: LIR_Opr _hdr; LIR_Opr _obj; LIR_Opr _lock; LIR_Opr _scratch; CodeStub* _stub; public: LIR_OpLock(LIR_Code code, LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info) : LIR_Op(code, LIR_OprFact::illegalOpr, info) , _hdr(hdr) , _obj(obj) , _lock(lock) , _scratch(scratch) , _stub(stub) {} LIR_Opr hdr_opr() const { return _hdr; } LIR_Opr obj_opr() const { return _obj; } LIR_Opr lock_opr() const { return _lock; } LIR_Opr scratch_opr() const { return _scratch; } CodeStub* stub() const { return _stub; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpLock* as_OpLock() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; }; class LIR_OpDelay: public LIR_Op { friend class LIR_OpVisitState; private: LIR_Op* _op; public: LIR_OpDelay(LIR_Op* op, CodeEmitInfo* info): LIR_Op(lir_delay_slot, LIR_OprFact::illegalOpr, info), _op(op) { assert(op->code() == lir_nop || LIRFillDelaySlots, "should be filling with nops"); } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpDelay* as_OpDelay() { return this; } void print_instr(outputStream* out) const PRODUCT_RETURN; LIR_Op* delay_op() const { return _op; } CodeEmitInfo* call_info() const { return info(); } }; // LIR_OpCompareAndSwap class LIR_OpCompareAndSwap : public LIR_Op { friend class LIR_OpVisitState; private: LIR_Opr _addr; LIR_Opr _cmp_value; LIR_Opr _new_value; LIR_Opr _tmp1; LIR_Opr _tmp2; public: LIR_OpCompareAndSwap(LIR_Code code, LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2, LIR_Opr result) : LIR_Op(code, result, NULL) // no result, no info , _addr(addr) , _cmp_value(cmp_value) , _new_value(new_value) , _tmp1(t1) , _tmp2(t2) { } LIR_Opr addr() const { return _addr; } LIR_Opr cmp_value() const { return _cmp_value; } LIR_Opr new_value() const { return _new_value; } LIR_Opr tmp1() const { return _tmp1; } LIR_Opr tmp2() const { return _tmp2; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpCompareAndSwap * as_OpCompareAndSwap () { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; // LIR_OpProfileCall class LIR_OpProfileCall : public LIR_Op { friend class LIR_OpVisitState; private: ciMethod* _profiled_method; int _profiled_bci; ciMethod* _profiled_callee; LIR_Opr _mdo; LIR_Opr _recv; LIR_Opr _tmp1; ciKlass* _known_holder; public: // Destroys recv LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder) : LIR_Op(code, LIR_OprFact::illegalOpr, NULL) // no result, no info , _profiled_method(profiled_method) , _profiled_bci(profiled_bci) , _profiled_callee(profiled_callee) , _mdo(mdo) , _recv(recv) , _tmp1(t1) , _known_holder(known_holder) { } ciMethod* profiled_method() const { return _profiled_method; } int profiled_bci() const { return _profiled_bci; } ciMethod* profiled_callee() const { return _profiled_callee; } LIR_Opr mdo() const { return _mdo; } LIR_Opr recv() const { return _recv; } LIR_Opr tmp1() const { return _tmp1; } ciKlass* known_holder() const { return _known_holder; } virtual void emit_code(LIR_Assembler* masm); virtual LIR_OpProfileCall* as_OpProfileCall() { return this; } virtual void print_instr(outputStream* out) const PRODUCT_RETURN; }; class LIR_InsertionBuffer; //--------------------------------LIR_List--------------------------------------------------- // Maintains a list of LIR instructions (one instance of LIR_List per basic block) // The LIR instructions are appended by the LIR_List class itself; // // Notes: // - all offsets are(should be) in bytes // - local positions are specified with an offset, with offset 0 being local 0 class LIR_List: public CompilationResourceObj { private: LIR_OpList _operations; Compilation* _compilation; #ifndef PRODUCT BlockBegin* _block; #endif #ifdef ASSERT const char * _file; int _line; #endif void append(LIR_Op* op) { if (op->source() == NULL) op->set_source(_compilation->current_instruction()); #ifndef PRODUCT if (PrintIRWithLIR) { _compilation->maybe_print_current_instruction(); op->print(); tty->cr(); } #endif // PRODUCT _operations.append(op); #ifdef ASSERT op->verify(); op->set_file_and_line(_file, _line); _file = NULL; _line = 0; #endif } public: LIR_List(Compilation* compilation, BlockBegin* block = NULL); #ifdef ASSERT void set_file_and_line(const char * file, int line); #endif //---------- accessors --------------- LIR_OpList* instructions_list() { return &_operations; } int length() const { return _operations.length(); } LIR_Op* at(int i) const { return _operations.at(i); } NOT_PRODUCT(BlockBegin* block() const { return _block; }); // insert LIR_Ops in buffer to right places in LIR_List void append(LIR_InsertionBuffer* buffer); //---------- mutators --------------- void insert_before(int i, LIR_List* op_list) { _operations.insert_before(i, op_list->instructions_list()); } void insert_before(int i, LIR_Op* op) { _operations.insert_before(i, op); } void remove_at(int i) { _operations.remove_at(i); } //---------- printing ------------- void print_instructions() PRODUCT_RETURN; //---------- instructions ------------- void call_opt_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result, address dest, LIR_OprList* arguments, CodeEmitInfo* info) { append(new LIR_OpJavaCall(lir_optvirtual_call, method, receiver, result, dest, arguments, info)); } void call_static(ciMethod* method, LIR_Opr result, address dest, LIR_OprList* arguments, CodeEmitInfo* info) { append(new LIR_OpJavaCall(lir_static_call, method, LIR_OprFact::illegalOpr, result, dest, arguments, info)); } void call_icvirtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result, address dest, LIR_OprList* arguments, CodeEmitInfo* info) { append(new LIR_OpJavaCall(lir_icvirtual_call, method, receiver, result, dest, arguments, info)); } void call_virtual(ciMethod* method, LIR_Opr receiver, LIR_Opr result, intptr_t vtable_offset, LIR_OprList* arguments, CodeEmitInfo* info) { append(new LIR_OpJavaCall(lir_virtual_call, method, receiver, result, vtable_offset, arguments, info)); } void call_dynamic(ciMethod* method, LIR_Opr receiver, LIR_Opr result, address dest, LIR_OprList* arguments, CodeEmitInfo* info) { append(new LIR_OpJavaCall(lir_dynamic_call, method, receiver, result, dest, arguments, info)); } void get_thread(LIR_Opr result) { append(new LIR_Op0(lir_get_thread, result)); } void word_align() { append(new LIR_Op0(lir_word_align)); } void membar() { append(new LIR_Op0(lir_membar)); } void membar_acquire() { append(new LIR_Op0(lir_membar_acquire)); } void membar_release() { append(new LIR_Op0(lir_membar_release)); } void membar_loadload() { append(new LIR_Op0(lir_membar_loadload)); } void membar_storestore() { append(new LIR_Op0(lir_membar_storestore)); } void membar_loadstore() { append(new LIR_Op0(lir_membar_loadstore)); } void membar_storeload() { append(new LIR_Op0(lir_membar_storeload)); } void nop() { append(new LIR_Op0(lir_nop)); } void build_frame() { append(new LIR_Op0(lir_build_frame)); } void std_entry(LIR_Opr receiver) { append(new LIR_Op0(lir_std_entry, receiver)); } void osr_entry(LIR_Opr osrPointer) { append(new LIR_Op0(lir_osr_entry, osrPointer)); } void branch_destination(Label* lbl) { append(new LIR_OpLabel(lbl)); } void negate(LIR_Opr from, LIR_Opr to) { append(new LIR_Op1(lir_neg, from, to)); } void leal(LIR_Opr from, LIR_Opr result_reg) { append(new LIR_Op1(lir_leal, from, result_reg)); } // result is a stack location for old backend and vreg for UseLinearScan // stack_loc_temp is an illegal register for old backend void roundfp(LIR_Opr reg, LIR_Opr stack_loc_temp, LIR_Opr result) { append(new LIR_OpRoundFP(reg, stack_loc_temp, result)); } void unaligned_move(LIR_Address* src, LIR_Opr dst) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); } void unaligned_move(LIR_Opr src, LIR_Address* dst) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), src->type(), lir_patch_none, NULL, lir_move_unaligned)); } void unaligned_move(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, NULL, lir_move_unaligned)); } void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); } void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); } void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { if (UseCompressedOops) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide)); } else { move(src, dst, info); } } void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { if (UseCompressedOops) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide)); } else { move(src, dst, info); } } void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); } void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); } void oop2reg_patch(jobject o, LIR_Opr reg, CodeEmitInfo* info); void return_op(LIR_Opr result) { append(new LIR_Op1(lir_return, result)); } void safepoint(LIR_Opr tmp, CodeEmitInfo* info) { append(new LIR_Op1(lir_safepoint, tmp, info)); } #ifdef PPC void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_OpConvert(code, left, dst, NULL, tmp1, tmp2)); } #endif void convert(Bytecodes::Code code, LIR_Opr left, LIR_Opr dst, ConversionStub* stub = NULL/*, bool is_32bit = false*/) { append(new LIR_OpConvert(code, left, dst, stub)); } void logical_and (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_and, left, right, dst)); } void logical_or (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or, left, right, dst)); } void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor, left, right, dst)); } void pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64, src, dst, T_LONG, lir_patch_none, NULL)); } void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); } void null_check(LIR_Opr opr, CodeEmitInfo* info) { append(new LIR_Op1(lir_null_check, opr, info)); } void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info)); } void unwind_exception(LIR_Opr exceptionOop) { append(new LIR_Op1(lir_unwind, exceptionOop)); } void compare_to (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_compare_to, left, right, dst)); } void push(LIR_Opr opr) { append(new LIR_Op1(lir_push, opr)); } void pop(LIR_Opr reg) { append(new LIR_Op1(lir_pop, reg)); } void cmp(LIR_Condition condition, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_cmp, condition, left, right, info)); } void cmp(LIR_Condition condition, LIR_Opr left, int right, CodeEmitInfo* info = NULL) { cmp(condition, left, LIR_OprFact::intConst(right), info); } void cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info); void cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Address* addr, CodeEmitInfo* info); void cmove(LIR_Condition condition, LIR_Opr src1, LIR_Opr src2, LIR_Opr dst, BasicType type) { append(new LIR_Op2(lir_cmove, condition, src1, src2, dst, type)); } void cas_long(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); void cas_obj(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); void cas_int(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value, LIR_Opr t1, LIR_Opr t2, LIR_Opr result = LIR_OprFact::illegalOpr); void abs (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_abs , from, tmp, to)); } void sqrt(LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_sqrt, from, tmp, to)); } void log (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_log, from, LIR_OprFact::illegalOpr, to, tmp)); } void log10 (LIR_Opr from, LIR_Opr to, LIR_Opr tmp) { append(new LIR_Op2(lir_log10, from, LIR_OprFact::illegalOpr, to, tmp)); } void sin (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_sin , from, tmp1, to, tmp2)); } void cos (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_cos , from, tmp1, to, tmp2)); } void tan (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2) { append(new LIR_Op2(lir_tan , from, tmp1, to, tmp2)); } void exp (LIR_Opr from, LIR_Opr to, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5) { append(new LIR_Op2(lir_exp , from, tmp1, to, tmp2, tmp3, tmp4, tmp5)); } void pow (LIR_Opr arg1, LIR_Opr arg2, LIR_Opr res, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, LIR_Opr tmp4, LIR_Opr tmp5) { append(new LIR_Op2(lir_pow, arg1, arg2, res, tmp1, tmp2, tmp3, tmp4, tmp5)); } void add (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_add, left, right, res)); } void sub (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_sub, left, right, res, info)); } void mul (LIR_Opr left, LIR_Opr right, LIR_Opr res) { append(new LIR_Op2(lir_mul, left, right, res)); } void mul_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_mul_strictfp, left, right, res, tmp)); } void div (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_div, left, right, res, info)); } void div_strictfp (LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_div_strictfp, left, right, res, tmp)); } void rem (LIR_Opr left, LIR_Opr right, LIR_Opr res, CodeEmitInfo* info = NULL) { append(new LIR_Op2(lir_rem, left, right, res, info)); } void volatile_load_mem_reg(LIR_Address* address, LIR_Opr dst, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); void volatile_load_unsafe_reg(LIR_Opr base, LIR_Opr offset, LIR_Opr dst, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code); void load(LIR_Address* addr, LIR_Opr src, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none); void prefetch(LIR_Address* addr, bool is_store); void store_mem_int(jint v, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); void store_mem_oop(jobject o, LIR_Opr base, int offset_in_bytes, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); void store(LIR_Opr src, LIR_Address* addr, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none); void volatile_store_mem_reg(LIR_Opr src, LIR_Address* address, CodeEmitInfo* info, LIR_PatchCode patch_code = lir_patch_none); void volatile_store_unsafe_reg(LIR_Opr src, LIR_Opr base, LIR_Opr offset, BasicType type, CodeEmitInfo* info, LIR_PatchCode patch_code); void idiv(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); void idiv(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); void irem(LIR_Opr left, LIR_Opr right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); void irem(LIR_Opr left, int right, LIR_Opr res, LIR_Opr tmp, CodeEmitInfo* info); void allocate_object(LIR_Opr dst, LIR_Opr t1, LIR_Opr t2, LIR_Opr t3, LIR_Opr t4, int header_size, int object_size, LIR_Opr klass, bool init_check, CodeStub* stub); void allocate_array(LIR_Opr dst, LIR_Opr len, LIR_Opr t1,LIR_Opr t2, LIR_Opr t3,LIR_Opr t4, BasicType type, LIR_Opr klass, CodeStub* stub); // jump is an unconditional branch void jump(BlockBegin* block) { append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, block)); } void jump(CodeStub* stub) { append(new LIR_OpBranch(lir_cond_always, T_ILLEGAL, stub)); } void branch(LIR_Condition cond, BasicType type, Label* lbl) { append(new LIR_OpBranch(cond, type, lbl)); } void branch(LIR_Condition cond, BasicType type, BlockBegin* block) { assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons"); append(new LIR_OpBranch(cond, type, block)); } void branch(LIR_Condition cond, BasicType type, CodeStub* stub) { assert(type != T_FLOAT && type != T_DOUBLE, "no fp comparisons"); append(new LIR_OpBranch(cond, type, stub)); } void branch(LIR_Condition cond, BasicType type, BlockBegin* block, BlockBegin* unordered) { assert(type == T_FLOAT || type == T_DOUBLE, "fp comparisons only"); append(new LIR_OpBranch(cond, type, block, unordered)); } void shift_left(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp); void shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp); void unsigned_shift_right(LIR_Opr value, LIR_Opr count, LIR_Opr dst, LIR_Opr tmp); void shift_left(LIR_Opr value, int count, LIR_Opr dst) { shift_left(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); } void shift_right(LIR_Opr value, int count, LIR_Opr dst) { shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); } void unsigned_shift_right(LIR_Opr value, int count, LIR_Opr dst) { unsigned_shift_right(value, LIR_OprFact::intConst(count), dst, LIR_OprFact::illegalOpr); } void lcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_cmp_l2i, left, right, dst)); } void fcmp2int(LIR_Opr left, LIR_Opr right, LIR_Opr dst, bool is_unordered_less); void call_runtime_leaf(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments) { append(new LIR_OpRTCall(routine, tmp, result, arguments)); } void call_runtime(address routine, LIR_Opr tmp, LIR_Opr result, LIR_OprList* arguments, CodeEmitInfo* info) { append(new LIR_OpRTCall(routine, tmp, result, arguments, info)); } void load_stack_address_monitor(int monitor_ix, LIR_Opr dst) { append(new LIR_Op1(lir_monaddr, LIR_OprFact::intConst(monitor_ix), dst)); } void unlock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub); void lock_object(LIR_Opr hdr, LIR_Opr obj, LIR_Opr lock, LIR_Opr scratch, CodeStub* stub, CodeEmitInfo* info); void set_24bit_fpu() { append(new LIR_Op0(lir_24bit_FPU )); } void restore_fpu() { append(new LIR_Op0(lir_reset_FPU )); } void breakpoint() { append(new LIR_Op0(lir_breakpoint)); } void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); } void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci); void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci); void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub, ciMethod* profiled_method, int profiled_bci); // methodDataOop profiling void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass)); } void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); } void xchg(LIR_Opr src, LIR_Opr set, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xchg, src, set, res, tmp)); } }; void print_LIR(BlockList* blocks); class LIR_InsertionBuffer : public CompilationResourceObj { private: LIR_List* _lir; // the lir list where ops of this buffer should be inserted later (NULL when uninitialized) // list of insertion points. index and count are stored alternately: // _index_and_count[i * 2]: the index into lir list where "count" ops should be inserted // _index_and_count[i * 2 + 1]: the number of ops to be inserted at index intStack _index_and_count; // the LIR_Ops to be inserted LIR_OpList _ops; void append_new(int index, int count) { _index_and_count.append(index); _index_and_count.append(count); } void set_index_at(int i, int value) { _index_and_count.at_put((i << 1), value); } void set_count_at(int i, int value) { _index_and_count.at_put((i << 1) + 1, value); } #ifdef ASSERT void verify(); #endif public: LIR_InsertionBuffer() : _lir(NULL), _index_and_count(8), _ops(8) { } // must be called before using the insertion buffer void init(LIR_List* lir) { assert(!initialized(), "already initialized"); _lir = lir; _index_and_count.clear(); _ops.clear(); } bool initialized() const { return _lir != NULL; } // called automatically when the buffer is appended to the LIR_List void finish() { _lir = NULL; } // accessors LIR_List* lir_list() const { return _lir; } int number_of_insertion_points() const { return _index_and_count.length() >> 1; } int index_at(int i) const { return _index_and_count.at((i << 1)); } int count_at(int i) const { return _index_and_count.at((i << 1) + 1); } int number_of_ops() const { return _ops.length(); } LIR_Op* op_at(int i) const { return _ops.at(i); } // append an instruction to the buffer void append(int index, LIR_Op* op); // instruction void move(int index, LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(index, new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } }; // // LIR_OpVisitState is used for manipulating LIR_Ops in an abstract way. // Calling a LIR_Op's visit function with a LIR_OpVisitState causes // information about the input, output and temporaries used by the // op to be recorded. It also records whether the op has call semantics // and also records all the CodeEmitInfos used by this op. // class LIR_OpVisitState: public StackObj { public: typedef enum { inputMode, firstMode = inputMode, tempMode, outputMode, numModes, invalidMode = -1 } OprMode; enum { maxNumberOfOperands = 20, maxNumberOfInfos = 4 }; private: LIR_Op* _op; // optimization: the operands and infos are not stored in a variable-length // list, but in a fixed-size array to save time of size checks and resizing int _oprs_len[numModes]; LIR_Opr* _oprs_new[numModes][maxNumberOfOperands]; int _info_len; CodeEmitInfo* _info_new[maxNumberOfInfos]; bool _has_call; bool _has_slow_case; // only include register operands // addresses are decomposed to the base and index registers // constants and stack operands are ignored void append(LIR_Opr& opr, OprMode mode) { assert(opr->is_valid(), "should not call this otherwise"); assert(mode >= 0 && mode < numModes, "bad mode"); if (opr->is_register()) { assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow"); _oprs_new[mode][_oprs_len[mode]++] = &opr; } else if (opr->is_pointer()) { LIR_Address* address = opr->as_address_ptr(); if (address != NULL) { // special handling for addresses: add base and index register of the address // both are always input operands or temp if we want to extend // their liveness! if (mode == outputMode) { mode = inputMode; } assert (mode == inputMode || mode == tempMode, "input or temp only for addresses"); if (address->_base->is_valid()) { assert(address->_base->is_register(), "must be"); assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow"); _oprs_new[mode][_oprs_len[mode]++] = &address->_base; } if (address->_index->is_valid()) { assert(address->_index->is_register(), "must be"); assert(_oprs_len[mode] < maxNumberOfOperands, "array overflow"); _oprs_new[mode][_oprs_len[mode]++] = &address->_index; } } else { assert(opr->is_constant(), "constant operands are not processed"); } } else { assert(opr->is_stack(), "stack operands are not processed"); } } void append(CodeEmitInfo* info) { assert(info != NULL, "should not call this otherwise"); assert(_info_len < maxNumberOfInfos, "array overflow"); _info_new[_info_len++] = info; } public: LIR_OpVisitState() { reset(); } LIR_Op* op() const { return _op; } void set_op(LIR_Op* op) { reset(); _op = op; } bool has_call() const { return _has_call; } bool has_slow_case() const { return _has_slow_case; } void reset() { _op = NULL; _has_call = false; _has_slow_case = false; _oprs_len[inputMode] = 0; _oprs_len[tempMode] = 0; _oprs_len[outputMode] = 0; _info_len = 0; } int opr_count(OprMode mode) const { assert(mode >= 0 && mode < numModes, "bad mode"); return _oprs_len[mode]; } LIR_Opr opr_at(OprMode mode, int index) const { assert(mode >= 0 && mode < numModes, "bad mode"); assert(index >= 0 && index < _oprs_len[mode], "index out of bound"); return *_oprs_new[mode][index]; } void set_opr_at(OprMode mode, int index, LIR_Opr opr) const { assert(mode >= 0 && mode < numModes, "bad mode"); assert(index >= 0 && index < _oprs_len[mode], "index out of bound"); *_oprs_new[mode][index] = opr; } int info_count() const { return _info_len; } CodeEmitInfo* info_at(int index) const { assert(index < _info_len, "index out of bounds"); return _info_new[index]; } XHandlers* all_xhandler(); // collects all register operands of the instruction void visit(LIR_Op* op); #if ASSERT // check that an operation has no operands bool no_operands(LIR_Op* op); #endif // LIR_Op visitor functions use these to fill in the state void do_input(LIR_Opr& opr) { append(opr, LIR_OpVisitState::inputMode); } void do_output(LIR_Opr& opr) { append(opr, LIR_OpVisitState::outputMode); } void do_temp(LIR_Opr& opr) { append(opr, LIR_OpVisitState::tempMode); } void do_info(CodeEmitInfo* info) { append(info); } void do_stub(CodeStub* stub); void do_call() { _has_call = true; } void do_slow_case() { _has_slow_case = true; } void do_slow_case(CodeEmitInfo* info) { _has_slow_case = true; append(info); } }; inline LIR_Opr LIR_OprDesc::illegalOpr() { return LIR_OprFact::illegalOpr; }; #endif // SHARE_VM_C1_C1_LIR_HPP
openjdk/jdk7u
hotspot/src/share/vm/c1/c1_LIR.hpp
C++
gpl-2.0
96,953
/* * Vulcan Build Manager * Copyright (C) 2005-2012 Chris Eldredge * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package net.sourceforge.vulcan.web.struts.forms; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import net.sourceforge.vulcan.core.NameCollisionResolutionMode; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.lang.StringUtils; import org.apache.struts.action.ActionMapping; import org.apache.struts.validator.ValidatorForm; public class ProjectImportForm extends ValidatorForm { private String url; private String nameCollisionResolutionMode; private String[] schedulerNames; private boolean createSubprojects; private boolean authenticationRequired; private String username; private String password; private String[] labels; private String newLabel; @Override public void reset(ActionMapping mapping, HttpServletRequest request) { super.reset(mapping, request); schedulerNames = ArrayUtils.EMPTY_STRING_ARRAY; labels = ArrayUtils.EMPTY_STRING_ARRAY; username = StringUtils.EMPTY; password = StringUtils.EMPTY; authenticationRequired = false; final HttpSession session = request.getSession(false); if (session != null) { session.removeAttribute("projectImportStatus"); } } public NameCollisionResolutionMode parseNameCollisionResolutionMode() { return NameCollisionResolutionMode.valueOf(nameCollisionResolutionMode); } public String getNameCollisionResolutionMode() { return nameCollisionResolutionMode; } public void setNameCollisionResolutionMode( String nameCollisionResolutionMode) { this.nameCollisionResolutionMode = nameCollisionResolutionMode; } public boolean isCreateSubprojects() { return createSubprojects; } public void setCreateSubprojects(boolean createSubprojects) { this.createSubprojects = createSubprojects; } public String[] getSchedulerNames() { return schedulerNames; } public void setSchedulerNames(String[] schedulerNames) { this.schedulerNames = schedulerNames; } public String getUrl() { return url; } public void setUrl(String url) { this.url = url; } public boolean isAuthenticationRequired() { return authenticationRequired; } public void setAuthenticationRequired(boolean authenticationRequired) { this.authenticationRequired = authenticationRequired; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String[] getLabels() { return labels; } public void setLabels(String[] labels) { this.labels = labels; } public String getNewLabel() { return newLabel; } public void setNewLabel(String newLabel) { this.newLabel = newLabel; } }
chriseldredge/vulcan
vulcan-web/source/main/java/net/sourceforge/vulcan/web/struts/forms/ProjectImportForm.java
Java
gpl-2.0
3,564
/* Copyright (C) 2013-2016, The Regents of The University of Michigan. All rights reserved. This software was developed in the APRIL Robotics Lab under the direction of Edwin Olson, ebolson@umich.edu. This software may be available under alternative licensing terms; contact the address above. An unlimited license is granted to use, adapt, modify, or embed the 2D barcodes into any medium. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the Regents of The University of Michigan. */ #include <stdlib.h> #include "apriltag.h" apriltag_family_t *tag25h7_create() { apriltag_family_t *tf = (apriltag_family_t *)calloc(1, sizeof(apriltag_family_t)); #ifdef WINRT tf->name = _strdup("tag25h7"); #else tf->name = strdup("tag25h7"); #endif tf->black_border = 1; tf->d = 5; tf->h = 7; tf->ncodes = 242; tf->codes = (uint64_t *)calloc(242, sizeof(uint64_t)); tf->codes[0] = 0x00000000004b770dUL; tf->codes[1] = 0x00000000011693e6UL; tf->codes[2] = 0x0000000001a599abUL; tf->codes[3] = 0x0000000000c3a535UL; tf->codes[4] = 0x000000000152aafaUL; tf->codes[5] = 0x0000000000accd98UL; tf->codes[6] = 0x0000000001cad922UL; tf->codes[7] = 0x00000000002c2fadUL; tf->codes[8] = 0x0000000000bb3572UL; tf->codes[9] = 0x00000000014a3b37UL; tf->codes[10] = 0x000000000186524bUL; tf->codes[11] = 0x0000000000c99d4cUL; tf->codes[12] = 0x000000000023bfeaUL; tf->codes[13] = 0x000000000141cb74UL; tf->codes[14] = 0x0000000001d0d139UL; tf->codes[15] = 0x0000000001670aebUL; tf->codes[16] = 0x0000000000851675UL; tf->codes[17] = 0x000000000150334eUL; tf->codes[18] = 0x00000000006e3ed8UL; tf->codes[19] = 0x0000000000fd449dUL; tf->codes[20] = 0x0000000000aa55ecUL; tf->codes[21] = 0x0000000001c86176UL; tf->codes[22] = 0x00000000015e9b28UL; tf->codes[23] = 0x00000000007ca6b2UL; tf->codes[24] = 0x000000000147c38bUL; tf->codes[25] = 0x0000000001d6c950UL; tf->codes[26] = 0x00000000008b0e8cUL; tf->codes[27] = 0x00000000011a1451UL; tf->codes[28] = 0x0000000001562b65UL; tf->codes[29] = 0x00000000013f53c8UL; tf->codes[30] = 0x0000000000d58d7aUL; tf->codes[31] = 0x0000000000829ec9UL; tf->codes[32] = 0x0000000000faccf1UL; tf->codes[33] = 0x000000000136e405UL; tf->codes[34] = 0x00000000007a2f06UL; tf->codes[35] = 0x00000000010934cbUL; tf->codes[36] = 0x00000000016a8b56UL; tf->codes[37] = 0x0000000001a6a26aUL; tf->codes[38] = 0x0000000000f85545UL; tf->codes[39] = 0x000000000195c2e4UL; tf->codes[40] = 0x000000000024c8a9UL; tf->codes[41] = 0x00000000012bfc96UL; tf->codes[42] = 0x00000000016813aaUL; tf->codes[43] = 0x0000000001a42abeUL; tf->codes[44] = 0x0000000001573424UL; tf->codes[45] = 0x0000000001044573UL; tf->codes[46] = 0x0000000000b156c2UL; tf->codes[47] = 0x00000000005e6811UL; tf->codes[48] = 0x0000000001659bfeUL; tf->codes[49] = 0x0000000001d55a63UL; tf->codes[50] = 0x00000000005bf065UL; tf->codes[51] = 0x0000000000e28667UL; tf->codes[52] = 0x0000000001e9ba54UL; tf->codes[53] = 0x00000000017d7c5aUL; tf->codes[54] = 0x0000000001f5aa82UL; tf->codes[55] = 0x0000000001a2bbd1UL; tf->codes[56] = 0x00000000001ae9f9UL; tf->codes[57] = 0x0000000001259e51UL; tf->codes[58] = 0x000000000134062bUL; tf->codes[59] = 0x0000000000e1177aUL; tf->codes[60] = 0x0000000000ed07a8UL; tf->codes[61] = 0x000000000162be24UL; tf->codes[62] = 0x000000000059128bUL; tf->codes[63] = 0x0000000001663e8fUL; tf->codes[64] = 0x00000000001a83cbUL; tf->codes[65] = 0x000000000045bb59UL; tf->codes[66] = 0x000000000189065aUL; tf->codes[67] = 0x00000000004bb370UL; tf->codes[68] = 0x00000000016fb711UL; tf->codes[69] = 0x000000000122c077UL; tf->codes[70] = 0x0000000000eca17aUL; tf->codes[71] = 0x0000000000dbc1f4UL; tf->codes[72] = 0x000000000088d343UL; tf->codes[73] = 0x000000000058ac5dUL; tf->codes[74] = 0x0000000000ba02e8UL; tf->codes[75] = 0x00000000001a1d9dUL; tf->codes[76] = 0x0000000001c72eecUL; tf->codes[77] = 0x0000000000924bc5UL; tf->codes[78] = 0x0000000000dccab3UL; tf->codes[79] = 0x0000000000886d15UL; tf->codes[80] = 0x000000000178c965UL; tf->codes[81] = 0x00000000005bc69aUL; tf->codes[82] = 0x0000000001716261UL; tf->codes[83] = 0x000000000174e2ccUL; tf->codes[84] = 0x0000000001ed10f4UL; tf->codes[85] = 0x0000000000156aa8UL; tf->codes[86] = 0x00000000003e2a8aUL; tf->codes[87] = 0x00000000002752edUL; tf->codes[88] = 0x000000000153c651UL; tf->codes[89] = 0x0000000001741670UL; tf->codes[90] = 0x0000000000765b05UL; tf->codes[91] = 0x000000000119c0bbUL; tf->codes[92] = 0x000000000172a783UL; tf->codes[93] = 0x00000000004faca1UL; tf->codes[94] = 0x0000000000f31257UL; tf->codes[95] = 0x00000000012441fcUL; tf->codes[96] = 0x00000000000d3748UL; tf->codes[97] = 0x0000000000c21f15UL; tf->codes[98] = 0x0000000000ac5037UL; tf->codes[99] = 0x000000000180e592UL; tf->codes[100] = 0x00000000007d3210UL; tf->codes[101] = 0x0000000000a27187UL; tf->codes[102] = 0x00000000002beeafUL; tf->codes[103] = 0x000000000026ff57UL; tf->codes[104] = 0x0000000000690e82UL; tf->codes[105] = 0x000000000077765cUL; tf->codes[106] = 0x0000000001a9e1d7UL; tf->codes[107] = 0x000000000140be1aUL; tf->codes[108] = 0x0000000001aa1e3aUL; tf->codes[109] = 0x0000000001944f5cUL; tf->codes[110] = 0x00000000019b5032UL; tf->codes[111] = 0x0000000000169897UL; tf->codes[112] = 0x0000000001068eb9UL; tf->codes[113] = 0x0000000000f30dbcUL; tf->codes[114] = 0x000000000106a151UL; tf->codes[115] = 0x0000000001d53e95UL; tf->codes[116] = 0x0000000001348ceeUL; tf->codes[117] = 0x0000000000cf4fcaUL; tf->codes[118] = 0x0000000001728bb5UL; tf->codes[119] = 0x0000000000dc1eecUL; tf->codes[120] = 0x000000000069e8dbUL; tf->codes[121] = 0x00000000016e1523UL; tf->codes[122] = 0x000000000105fa25UL; tf->codes[123] = 0x00000000018abb0cUL; tf->codes[124] = 0x0000000000c4275dUL; tf->codes[125] = 0x00000000006d8e76UL; tf->codes[126] = 0x0000000000e8d6dbUL; tf->codes[127] = 0x0000000000e16fd7UL; tf->codes[128] = 0x0000000001ac2682UL; tf->codes[129] = 0x000000000077435bUL; tf->codes[130] = 0x0000000000a359ddUL; tf->codes[131] = 0x00000000003a9c4eUL; tf->codes[132] = 0x000000000123919aUL; tf->codes[133] = 0x0000000001e25817UL; tf->codes[134] = 0x000000000002a836UL; tf->codes[135] = 0x00000000001545a4UL; tf->codes[136] = 0x0000000001209c8dUL; tf->codes[137] = 0x0000000000bb5f69UL; tf->codes[138] = 0x0000000001dc1f02UL; tf->codes[139] = 0x00000000005d5f7eUL; tf->codes[140] = 0x00000000012d0581UL; tf->codes[141] = 0x00000000013786c2UL; tf->codes[142] = 0x0000000000e15409UL; tf->codes[143] = 0x0000000001aa3599UL; tf->codes[144] = 0x000000000139aad8UL; tf->codes[145] = 0x0000000000b09d2aUL; tf->codes[146] = 0x000000000054488fUL; tf->codes[147] = 0x00000000013c351cUL; tf->codes[148] = 0x0000000000976079UL; tf->codes[149] = 0x0000000000b25b12UL; tf->codes[150] = 0x0000000001addb34UL; tf->codes[151] = 0x0000000001cb23aeUL; tf->codes[152] = 0x0000000001175738UL; tf->codes[153] = 0x0000000001303bb8UL; tf->codes[154] = 0x0000000000d47716UL; tf->codes[155] = 0x000000000188ceeaUL; tf->codes[156] = 0x0000000000baf967UL; tf->codes[157] = 0x0000000001226d39UL; tf->codes[158] = 0x000000000135e99bUL; tf->codes[159] = 0x000000000034adc5UL; tf->codes[160] = 0x00000000002e384dUL; tf->codes[161] = 0x000000000090d3faUL; tf->codes[162] = 0x0000000000232713UL; tf->codes[163] = 0x00000000017d49b1UL; tf->codes[164] = 0x0000000000aa84d6UL; tf->codes[165] = 0x0000000000c2ddf8UL; tf->codes[166] = 0x0000000001665646UL; tf->codes[167] = 0x00000000004f345fUL; tf->codes[168] = 0x00000000002276b1UL; tf->codes[169] = 0x0000000001255dd7UL; tf->codes[170] = 0x00000000016f4cccUL; tf->codes[171] = 0x00000000004aaffcUL; tf->codes[172] = 0x0000000000c46da6UL; tf->codes[173] = 0x000000000085c7b3UL; tf->codes[174] = 0x0000000001311fcbUL; tf->codes[175] = 0x00000000009c6c4fUL; tf->codes[176] = 0x000000000187d947UL; tf->codes[177] = 0x00000000008578e4UL; tf->codes[178] = 0x0000000000e2bf0bUL; tf->codes[179] = 0x0000000000a01b4cUL; tf->codes[180] = 0x0000000000a1493bUL; tf->codes[181] = 0x00000000007ad766UL; tf->codes[182] = 0x0000000000ccfe82UL; tf->codes[183] = 0x0000000001981b5bUL; tf->codes[184] = 0x0000000001cacc85UL; tf->codes[185] = 0x0000000000562cdbUL; tf->codes[186] = 0x00000000015b0e78UL; tf->codes[187] = 0x00000000008f66c5UL; tf->codes[188] = 0x00000000003332bfUL; tf->codes[189] = 0x00000000012ce754UL; tf->codes[190] = 0x0000000000096a76UL; tf->codes[191] = 0x0000000001d5e3baUL; tf->codes[192] = 0x000000000027ea41UL; tf->codes[193] = 0x00000000014412dfUL; tf->codes[194] = 0x000000000067b9b4UL; tf->codes[195] = 0x0000000000daa51aUL; tf->codes[196] = 0x00000000001dcb17UL; tf->codes[197] = 0x00000000004d4afdUL; tf->codes[198] = 0x00000000006335d5UL; tf->codes[199] = 0x0000000000ee2334UL; tf->codes[200] = 0x00000000017d4e55UL; tf->codes[201] = 0x0000000001b8b0f0UL; tf->codes[202] = 0x00000000014999e3UL; tf->codes[203] = 0x0000000001513dfaUL; tf->codes[204] = 0x0000000000765cf2UL; tf->codes[205] = 0x000000000056af90UL; tf->codes[206] = 0x00000000012e16acUL; tf->codes[207] = 0x0000000001d3d86cUL; tf->codes[208] = 0x0000000000ff279bUL; tf->codes[209] = 0x00000000018822ddUL; tf->codes[210] = 0x000000000099d478UL; tf->codes[211] = 0x00000000008dc0d2UL; tf->codes[212] = 0x000000000034b666UL; tf->codes[213] = 0x0000000000cf9526UL; tf->codes[214] = 0x000000000186443dUL; tf->codes[215] = 0x00000000007a8e29UL; tf->codes[216] = 0x00000000019c6aa5UL; tf->codes[217] = 0x0000000001f2a27dUL; tf->codes[218] = 0x00000000012b2136UL; tf->codes[219] = 0x0000000000d0cd0dUL; tf->codes[220] = 0x00000000012cb320UL; tf->codes[221] = 0x00000000017ddb0bUL; tf->codes[222] = 0x000000000005353bUL; tf->codes[223] = 0x00000000015b2cafUL; tf->codes[224] = 0x0000000001e5a507UL; tf->codes[225] = 0x000000000120f1e5UL; tf->codes[226] = 0x000000000114605aUL; tf->codes[227] = 0x00000000014efe4cUL; tf->codes[228] = 0x0000000000568134UL; tf->codes[229] = 0x00000000011b9f92UL; tf->codes[230] = 0x000000000174d2a7UL; tf->codes[231] = 0x0000000000692b1dUL; tf->codes[232] = 0x000000000039e4feUL; tf->codes[233] = 0x0000000000aaff3dUL; tf->codes[234] = 0x000000000096224cUL; tf->codes[235] = 0x00000000013c9f77UL; tf->codes[236] = 0x000000000110ee8fUL; tf->codes[237] = 0x0000000000f17beaUL; tf->codes[238] = 0x000000000099fb5dUL; tf->codes[239] = 0x0000000000337141UL; tf->codes[240] = 0x000000000002b54dUL; tf->codes[241] = 0x0000000001233a70UL; return tf; } void tag25h7_destroy(apriltag_family_t *tf) { free(tf->name); free(tf->codes); free(tf); }
s-trinh/visp
3rdparty/apriltag/tag25h7.c
C
gpl-2.0
12,440
Ext.define('Portal.view.main.MainController', { extend: 'Ext.app.ViewController', alias: 'controller.main', onAddFeed: function () { var dashboard = this.lookupReference('dashboard'); dashboard.addNew('rss'); }, onAddFeedUrl: function (sender) { var dashboard = this.lookupReference('dashboard'); dashboard.addView({ type: 'rss', feedUrl: sender.feedUrl }); } });
mesocentrefc/Janua-SMS
janua-web/ext/examples/classic/portal/app/view/main/MainController.js
JavaScript
gpl-2.0
475
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml" lang="en_US" xml:lang="en_US"> <head> <title>User Manager [ User : Edit ]</title> <link href="css/help.css" rel="stylesheet" type="text/css" /> <meta name="copyright" content="(C) 2005 Open Source Matters. All Rights Reserved." /> <meta name="license" content="http://www.gnu.org/copyleft/gpl.html GNU/GPL" /> </head> <body> <h1>User Manager [ User : Edit ]</h1> <p>The local copy of this help file is no longer maintained. Please use the <a href="http://help.joomla.org/index2.php?option=com_content&task=findkey&pop=1&keyref=screen.users.edit2">online version</a>.</p> </body> </html>
foresitegroup/sherwin
help/screen.users.edit2.html
HTML
gpl-2.0
752
<?php /** * Implements Special:MediaStatistics * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * http://www.gnu.org/copyleft/gpl.html * * @file * @ingroup SpecialPage * @author Brian Wolff */ /** * @ingroup SpecialPage */ class MediaStatisticsPage extends QueryPage { protected $totalCount = 0, $totalBytes = 0; function __construct( $name = 'MediaStatistics' ) { parent::__construct( $name ); // Generally speaking there is only a small number of file types, // so just show all of them. $this->limit = 5000; $this->shownavigation = false; } function isExpensive() { return true; } /** * Query to do. * * This abuses the query cache table by storing mime types as "titles". * * This will store entries like [[Media:BITMAP;image/jpeg;200;20000]] * where the form is Media type;mime type;count;bytes. * * This relies on the behaviour that when value is tied, the order things * come out of querycache table is the order they went in. Which is hacky. * However, other special pages like Special:Deadendpages and * Special:BrokenRedirects also rely on this. */ public function getQueryInfo() { $dbr = wfGetDB( DB_SLAVE ); $fakeTitle = $dbr->buildConcat( array( 'img_media_type', $dbr->addQuotes( ';' ), 'img_major_mime', $dbr->addQuotes( '/' ), 'img_minor_mime', $dbr->addQuotes( ';' ), 'COUNT(*)', $dbr->addQuotes( ';' ), 'SUM( img_size )' ) ); return array( 'tables' => array( 'image' ), 'fields' => array( 'title' => $fakeTitle, 'namespace' => NS_MEDIA, /* needs to be something */ 'value' => '1' ), 'options' => array( 'GROUP BY' => array( 'img_media_type', 'img_major_mime', 'img_minor_mime', ) ) ); } /** * How to sort the results * * It's important that img_media_type come first, otherwise the * tables will be fragmented. * @return Array Fields to sort by */ function getOrderFields() { return array( 'img_media_type', 'count(*)', 'img_major_mime', 'img_minor_mime' ); } /** * Output the results of the query. * * @param $out OutputPage * @param $skin Skin (deprecated presumably) * @param $dbr IDatabase * @param $res ResultWrapper Results from query * @param $num integer Number of results * @param $offset integer Paging offset (Should always be 0 in our case) */ protected function outputResults( $out, $skin, $dbr, $res, $num, $offset ) { $prevMediaType = null; foreach ( $res as $row ) { list( $mediaType, $mime, $totalCount, $totalBytes ) = $this->splitFakeTitle( $row->title ); if ( $prevMediaType !== $mediaType ) { if ( $prevMediaType !== null ) { // We're not at beginning, so we have to // close the previous table. $this->outputTableEnd(); } $this->outputMediaType( $mediaType ); $this->outputTableStart( $mediaType ); $prevMediaType = $mediaType; } $this->outputTableRow( $mime, intval( $totalCount ), intval( $totalBytes ) ); } if ( $prevMediaType !== null ) { $this->outputTableEnd(); } } /** * Output closing </table> */ protected function outputTableEnd() { $this->getOutput()->addHtml( Html::closeElement( 'table' ) ); } /** * Output a row of the stats table * * @param $mime String mime type (e.g. image/jpeg) * @param $count integer Number of images of this type * @param $totalBytes integer Total space for images of this type */ protected function outputTableRow( $mime, $count, $bytes ) { $mimeSearch = SpecialPage::getTitleFor( 'MIMEsearch', $mime ); $row = Html::rawElement( 'td', array(), Linker::link( $mimeSearch, htmlspecialchars( $mime ) ) ); $row .= Html::element( 'td', array(), $this->getExtensionList( $mime ) ); $row .= Html::rawElement( 'td', // Make sure js sorts it in numeric order array( 'data-sort-value' => $count ), $this->msg( 'mediastatistics-nfiles' ) ->numParams( $count ) /** @todo Check to be sure this really should have number formatting */ ->numParams( $this->makePercentPretty( $count / $this->totalCount ) ) ->parse() ); $row .= Html::rawElement( 'td', // Make sure js sorts it in numeric order array( 'data-sort-value' => $bytes ), $this->msg( 'mediastatistics-nbytes' ) ->numParams( $bytes ) ->sizeParams( $bytes ) /** @todo Check to be sure this really should have number formatting */ ->numParams( $this->makePercentPretty( $bytes / $this->totalBytes ) ) ->parse() ); $this->getOutput()->addHTML( Html::rawElement( 'tr', array(), $row ) ); } /** * @param float $decimal A decimal percentage (ie for 12.3%, this would be 0.123) * @return String The percentage formatted so that 3 significant digits are shown. */ protected function makePercentPretty( $decimal ) { $decimal *= 100; // Always show three useful digits if ( $decimal == 0 ) { return '0'; } if ( $decimal >= 100 ) { return '100'; } $percent = sprintf( "%." . max( 0, 2 - floor( log10( $decimal ) ) ) . "f", $decimal ); // Then remove any trailing 0's return preg_replace( '/\.?0*$/', '', $percent ); } /** * Given a mime type, return a comma separated list of allowed extensions. * * @param $mime String mime type * @return String Comma separated list of allowed extensions (e.g. ".ogg, .oga") */ private function getExtensionList( $mime ) { $exts = MimeMagic::singleton()->getExtensionsForType( $mime ); if ( $exts === null ) { return ''; } $extArray = explode( ' ', $exts ); $extArray = array_unique( $extArray ); foreach ( $extArray as &$ext ) { $ext = '.' . $ext; } return $this->getLanguage()->commaList( $extArray ); } /** * Output the start of the table * * Including opening <table>, and first <tr> with column headers. */ protected function outputTableStart( $mediaType ) { $this->getOutput()->addHTML( Html::openElement( 'table', array( 'class' => array( 'mw-mediastats-table', 'mw-mediastats-table-' . strtolower( $mediaType ), 'sortable', 'wikitable' )) ) ); $this->getOutput()->addHTML( $this->getTableHeaderRow() ); } /** * Get (not output) the header row for the table * * @return String the header row of the able */ protected function getTableHeaderRow() { $headers = array( 'mimetype', 'extensions', 'count', 'totalbytes' ); $ths = ''; foreach ( $headers as $header ) { $ths .= Html::rawElement( 'th', array(), // for grep: // mediastatistics-table-mimetype, mediastatistics-table-extensions // tatistics-table-count, mediastatistics-table-totalbytes $this->msg( 'mediastatistics-table-' . $header )->parse() ); } return Html::rawElement( 'tr', array(), $ths ); } /** * Output a header for a new media type section * * @param $mediaType string A media type (e.g. from the MEDIATYPE_xxx constants) */ protected function outputMediaType( $mediaType ) { $this->getOutput()->addHTML( Html::element( 'h2', array( 'class' => array( 'mw-mediastats-mediatype', 'mw-mediastats-mediatype-' . strtolower( $mediaType ) )), // for grep // mediastatistics-header-unknown, mediastatistics-header-bitmap, // mediastatistics-header-drawing, mediastatistics-header-audio, // mediastatistics-header-video, mediastatistics-header-multimedia, // mediastatistics-header-office, mediastatistics-header-text, // mediastatistics-header-executable, mediastatistics-header-archive, $this->msg( 'mediastatistics-header-' . strtolower( $mediaType ) )->text() ) ); /** @todo Possibly could add a message here explaining what the different types are. * not sure if it is needed though. */ } /** * parse the fake title format that this special page abuses querycache with. * * @param $fakeTitle String A string formatted as <media type>;<mime type>;<count>;<bytes> * @return Array The constituant parts of $fakeTitle */ private function splitFakeTitle( $fakeTitle ) { return explode( ';', $fakeTitle, 4 ); } /** * What group to put the page in * @return string */ protected function getGroupName() { return 'media'; } /** * This method isn't used, since we override outputResults, but * we need to implement since abstract in parent class. * * @param $skin Skin * @param $result stdObject Result row * @return bool|string|void * @throws MWException */ public function formatResult( $skin, $result ) { throw new MWException( "unimplemented" ); } /** * Initialize total values so we can figure out percentages later. * * @param $dbr IDatabase * @param $res ResultWrapper */ public function preprocessResults( $dbr, $res ) { $this->totalCount = $this->totalBytes = 0; foreach ( $res as $row ) { $mediaStats = $this->splitFakeTitle( $row->title ); $this->totalCount += isset( $mediaStats[2] ) ? $mediaStats[2] : 0; $this->totalBytes += isset( $mediaStats[3] ) ? $mediaStats[3] : 0; } $res->seek( 0 ); } }
hipbr/hipbr.github.io
mediawiki-master/includes/specials/SpecialMediaStatistics.php
PHP
gpl-2.0
9,698
<?php /** * Siwtches uplink parameters management class */ class SwitchUplinks { /** * Current instance switch ID * * @var int */ protected $switchId = 0; /** * Contains current switch uplink data * * @var array */ protected $uplinkData = array(); /** * Contains available media types markers and their names * * @var array */ protected $mediaTypes = array(); /** * Contains available media types icons * * @var array */ protected $mediaIcons = array(); /** * Contains typical uplink speed rates * * @var array */ protected $speedRates = array(); /** * System message helper placeholder * * @var object */ protected $messages = ''; /** * Switches uplink paramereds DB abstraction placeholder * * @var object */ protected $switchUplinks = ''; /** * Contains all switches uplinks detailed data as switch=>updata * * @var array */ protected $allUplinksData = array(); /** * Static routes, etc */ const TABLE_UPLINKS = 'switchuplinks'; const URL_SWPROFILE = '?module=switches&edit='; const ROUTE_SWID = 'swuplinkswitchid'; const ROUTE_MEDIA = 'swuplinksmedia'; const ROUTE_SPEED = 'swuplinksspeed'; const ROUTE_PORT = 'swuplinksport'; const ROUTE_EDITINTERFACE = 'editswuplinkparameters'; const PATH_ICONS = 'skins/'; /** * Creates new switch uplinks object instance * * @param int/void $switchId */ public function __construct($switchId = '') { $this->initMessages(); $this->initDatabase(); $this->setMediaTypes(); $this->setSpeedRates(); if (!empty($switchId)) { $this->setSwitchId($switchId); $this->loadUplinkData(); } } /** * Sets available uplink media types * * @return void */ protected function setMediaTypes() { //may be configurable in future.. or not.. $this->mediaTypes = array( 'F' => __('Fiber optics'), 'C' => __('Copper'), 'W' => __('Wireless'), ); $this->mediaIcons = array( 'F' => 'linkfiber.png', 'C' => 'linkcopper.png', 'W' => 'linkwireless.png', ); } /** * Sets typical speed rates for uplink ports * * @return void */ protected function setSpeedRates() { $this->speedRates = array( '1G' => '1 ' . __('Gbit/s'), '10G' => '10 ' . __('Gbit/s'), '40G' => '40 ' . __('Gbit/s'), '100M' => '100 ' . __('Mbit/s'), '10M' => '10 ' . __('Mbit/s'), ); } /** * Inits system message helper instance for further usage * * @return void */ protected function initMessages() { $this->messages = new UbillingMessageHelper(); } /** * Inits dabase abstraction * * @return void */ protected function initDatabase() { $this->switchUplinks = new NyanORM(self::TABLE_UPLINKS); } /** * Current instance switchId setter * * @param int/void $switchId * * @return void */ protected function setSwitchId($switchId = '') { $switchId = ubRouting::filters($switchId, 'int'); if (!empty($switchId)) { $this->switchId = $switchId; } } /** * Loads current switch uplink data * * @return void */ protected function loadUplinkData() { if (!empty($this->switchId)) { $this->switchUplinks->where('switchid', '=', $this->switchId); $tmpData = $this->switchUplinks->getAll(); if (!empty($tmpData)) { if (isset($tmpData[0])) { $this->uplinkData = $tmpData[0]; } } } } /** * Renders uplink parameters editing inputs * * @return string */ public function renderEditForm() { $result = ''; if (!empty($this->switchId)) { $mediaTmp = array('' => '-'); $mediaTmp += $this->mediaTypes; $speedTmp = array('' => '-'); $speedTmp += $this->speedRates; $inputs = wf_HiddenInput(self::ROUTE_SWID, $this->switchId); $inputs .= wf_Selector(self::ROUTE_MEDIA, $mediaTmp, __('Type'), @$this->uplinkData['media'], false) . ' '; $inputs .= wf_Selector(self::ROUTE_SPEED, $speedTmp, __('Speed'), @$this->uplinkData['speed'], false) . ' '; $inputs .= wf_TextInput(self::ROUTE_PORT, __('Port'), @$this->uplinkData['port'], false, 2, 'digits') . ' '; $result .= $inputs; //we need it for main edit form integration } else { $result .= $this->messages->getStyledMessage(__('Something went wrong') . ': ' . __('Switch') . ' ID ' . __('is empty'), 'error'); } return($result); } /** * Saves switch uplink data into database * * @return void */ public function save() { if (ubRouting::checkPost(array(self::ROUTE_SWID))) { $switchId = ubRouting::post(self::ROUTE_SWID, 'int'); $newMedia = ubRouting::post(self::ROUTE_MEDIA, 'mres'); $newSpeed = ubRouting::post(self::ROUTE_SPEED, 'mres'); $newPort = ubRouting::post(self::ROUTE_PORT, 'int'); $this->switchUplinks->data('media', $newMedia); $this->switchUplinks->data('speed', $newSpeed); $this->switchUplinks->data('port', $newPort); //updating existing record if (!empty($this->uplinkData)) { $this->switchUplinks->where('switchid', '=', $switchId); $this->switchUplinks->save(); } else { //creating new record $this->switchUplinks->data('switchid', $switchId); $this->switchUplinks->create(); } log_register('SWITCHUPLINK CHANGE [' . $switchId . '] MEDIA `' . $newMedia . '` SPEED `' . $newSpeed . '` PORT `' . $newPort . '`'); } } /** * Renders current instance uplink data in compact format * * @return string */ public function renderSwitchUplinkData() { $result = ''; if (!empty($this->uplinkData)) { if (!empty($this->uplinkData['media'])) { if (isset($this->mediaIcons[$this->uplinkData['media']])) { $mediaIcon = wf_img_sized(self::PATH_ICONS . $this->mediaIcons[$this->uplinkData['media']], '', '10') . ' '; } else { $mediaIcon = ''; } $result .= $mediaIcon . $this->mediaTypes[$this->uplinkData['media']] . ' '; } if (!empty($this->uplinkData['speed'])) { $result .= $this->speedRates[$this->uplinkData['speed']] . ' '; } if (!empty($this->uplinkData['port'])) { $result .= $this->uplinkData['port'] . ' ' . __('Port'); } //empty existing record if (!$this->uplinkData['media'] AND ! $this->uplinkData['speed'] AND ! $this->uplinkData['port']) { $result .= __('Uplink parameters is not set'); } } else { $result .= __('Uplink parameters is not set'); } return($result); } /** * Loads all switches uplinks data * * @return void */ public function loadAllUplinksData() { $this->allUplinksData = $this->switchUplinks->getAll('switchid'); } /** * Returns count of available uplinks data records * * @return int */ public function getAllUplinksCount() { return(sizeof($this->allUplinksData)); } /** * Returns short uplink parameters text description * * @param int $swithchId * * @return string */ public function getUplinkTinyDesc($swithchId) { $result = ''; if (isset($this->allUplinksData[$swithchId])) { $media = $this->allUplinksData[$swithchId]['media']; $speed = $this->allUplinksData[$swithchId]['speed']; $icon = (isset($this->mediaIcons[$media])) ? wf_img(self::PATH_ICONS . $this->mediaIcons[$media], $this->mediaTypes[$media]) : ''; $result .= $icon . $media . $speed; } return($result); } }
l1ght13aby/Ubilling
api/libs/api.switchuplinks.php
PHP
gpl-2.0
8,600
#!/usr/bin/env python from runtest import TestBase class TestCase(TestBase): def __init__(self): TestBase.__init__(self, 'float-libcall', result=""" # DURATION TID FUNCTION [18276] | main() { 0.371 ms [18276] | expf(1.000000) = 2.718282; 0.118 ms [18276] | log(2.718282) = 1.000000; 3.281 ms [18276] | } /* main */ """) def build(self, name, cflags='', ldflags=''): # cygprof doesn't support arguments now if cflags.find('-finstrument-functions') >= 0: return TestBase.TEST_SKIP ldflags += " -lm" return TestBase.build(self, name, cflags, ldflags) def setup(self): self.option = '-A "expf@fparg1/32" -R "expf@retval/f32" ' self.option += '-A "log@fparg1/64" -R "log@retval/f64" '
namhyung/uftrace
tests/t198_lib_arg_float.py
Python
gpl-2.0
799
//# tLCStretch.cc: Test program for LCStretch class //# Copyright (C) 2001 //# Associated Universities, Inc. Washington DC, USA. //# //# This library is free software; you can redistribute it and/or modify it //# under the terms of the GNU Library General Public License as published by //# the Free Software Foundation; either version 2 of the License, or (at your //# option) any later version. //# //# This library is distributed in the hope that it will be useful, but WITHOUT //# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or //# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public //# License for more details. //# //# You should have received a copy of the GNU Library General Public License //# along with this library; if not, write to the Free Software Foundation, //# Inc., 675 Massachusetts Ave, Cambridge, MA 02139, USA. //# //# Correspondence concerning AIPS++ should be addressed as follows: //# Internet email: aips2-request@nrao.edu. //# Postal address: AIPS++ Project Office //# National Radio Astronomy Observatory //# 520 Edgemont Road //# Charlottesville, VA 22903-2475 USA //# //# $Id$ #include <casacore/lattices/LRegions/LCStretch.h> #include <casacore/lattices/LRegions/LCExtension.h> #include <casacore/lattices/LRegions/LCBox.h> #include <casacore/lattices/LRegions/LCPolygon.h> #include <casacore/tables/Tables/TableRecord.h> #include <casacore/casa/Arrays/Vector.h> #include <casacore/casa/Arrays/ArrayIO.h> #include <casacore/casa/Arrays/ArrayLogical.h> #include <casacore/casa/Utilities/Assert.h> #include <casacore/casa/iostream.h> #include <casacore/casa/namespace.h> void doIt (const LCRegion& region, const IPosition& axes, const IPosition& blc, const IPosition& trc, const IPosition& latticeShape) { try { LCExtension ext1 (region, axes, LCBox(IPosition(latticeShape.nelements(), 1))); LCStretch prism (ext1, axes, LCBox(blc, trc, latticeShape)); AlwaysAssertExit (prism.hasMask() == region.hasMask()); AlwaysAssertExit (! prism.isWritable()); Array<Bool> regmask; uInt ndimr = region.boundingBox().ndim(); uInt ndim = ndimr + latticeShape.nelements(); ((LCRegion&)region).getSlice (regmask, IPosition(ndimr,0), region.boundingBox().length(), IPosition(ndimr,1)); cout << regmask << endl; Array<Bool> mask; prism.getSlice (mask, IPosition(ndim,0), prism.boundingBox().length(), IPosition(ndim,1)); cout << mask << endl; cout << prism.hasMask() << ' ' << endl; cout << prism.boundingBox().start() << prism.boundingBox().end() << prism.boundingBox().length() << prism.latticeShape() << endl; cout << prism.stretchAxes() << prism.stretchBox().blc() << prism.stretchBox().trc() << endl; { // Test cloning. LCRegion* prismcop = prism.cloneRegion(); AlwaysAssertExit (prism.hasMask() == prismcop->hasMask()); AlwaysAssertExit (prism.boundingBox().start() == prismcop->boundingBox().start()); AlwaysAssertExit (prism.boundingBox().end() == prismcop->boundingBox().end()); AlwaysAssertExit (prism.boundingBox().stride() == prismcop->boundingBox().stride()); AlwaysAssertExit (prism.boundingBox().length() == prismcop->boundingBox().length()); Array<Bool> arr; prismcop->getSlice (arr, IPosition(ndim,0), prism.boundingBox().length(), IPosition(ndim,1)); AlwaysAssertExit (allEQ (arr, mask)); delete prismcop; } { // Test persistency. LCRegion* prismcop = LCRegion::fromRecord (prism.toRecord(""), ""); AlwaysAssertExit (prism.hasMask() == prismcop->hasMask()); AlwaysAssertExit (prism.boundingBox().start() == prismcop->boundingBox().start()); AlwaysAssertExit (prism.boundingBox().end() == prismcop->boundingBox().end()); AlwaysAssertExit (prism.boundingBox().stride() == prismcop->boundingBox().stride()); AlwaysAssertExit (prism.boundingBox().length() == prismcop->boundingBox().length()); Array<Bool> arr; prismcop->getSlice (arr, IPosition(ndim,0), prism.boundingBox().length(), IPosition(ndim,1)); AlwaysAssertExit (allEQ (arr, mask)); delete prismcop; } { // Test equality. LCStretch prism2(prism); AlwaysAssertExit (prism2 == prism); } { // Test unequality. LCExtension ext2 (region, axes, LCBox(IPosition(latticeShape.nelements(), 1))); LCStretch prism2 (ext2, axes, LCBox(blc-1, trc, latticeShape)); AlwaysAssertExit (prism2 != prism); } } catch (AipsError x) { cout << x.getMesg() << endl; } } void doItError (const LCRegion& region, const IPosition& axes, const IPosition& blc, const IPosition& trc, const IPosition& latticeShape) { try { LCStretch prism (region, axes, LCBox(blc, trc, latticeShape)); } catch (AipsError x) { cout << x.getMesg() << endl; } } int main() { try { // A simple box (having no mask). LCBox box (IPosition(2,1,4), IPosition(2,5,6), IPosition(2,12,14)); // A cross-like figure. Vector<Float> x(4), y(4); x(0)=3; y(0)=3; x(1)=9; y(1)=3; x(2)=3; y(2)=8; x(3)=9; y(3)=8; LCPolygon polygon(x, y, IPosition(2,12,14)); doIt (box, IPosition(1,1), IPosition(1,2), IPosition(1,3), IPosition(1,20)); doIt (polygon, IPosition(1,2), IPosition(1,2), IPosition(1,3), IPosition(1,4)); doIt (polygon, IPosition(1,1), IPosition(1,2), IPosition(1,3), IPosition(1,20)); doIt (polygon, IPosition(2,2,0), IPosition(2,0,2), IPosition(2,2,3), IPosition(2,10,20)); // Trc outside lattice, is silently adjusted doIt (polygon, IPosition(1,2), IPosition(1,2), IPosition(1,5), IPosition(1,3)); // Error; no stretchaxes doItError (polygon, IPosition(), IPosition(1,2), IPosition(1,3), IPosition(1,20)); // Error; #stretchAxes mismatches blc/trc length doItError (polygon, IPosition(2,0,1), IPosition(1,2), IPosition(1,3), IPosition(1,20)); // Error; #stretchAxes exceed nrdim doItError (polygon, IPosition(1,2), IPosition(1,2), IPosition(1,3), IPosition(1,20)); // Error; incorrect order of stretchAxes doItError (polygon, IPosition(2,1), IPosition(2,0), IPosition(2,0), IPosition(2,1)); // Error; stretched axis has not length 1 doItError (polygon, IPosition(1,0), IPosition(1,2), IPosition(1,3), IPosition(1,20)); } catch (AipsError x) { cout << "Caught exception: " << x.getMesg() << endl; return 1; } cout << "OK" << endl; return 0; }
indebetouw/casacore
lattices/LRegions/test/tLCStretch.cc
C++
gpl-2.0
6,525
/* * Copyright (C) 2013-2015 DeathCore <http://www.noffearrdeathproject.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef DEF_GATE_OF_THE_SETTING_SUN_H_ #define DEF_GATE_OF_THE_SETTING_SUN_H_ #define MAX_ENCOUNTERS 4 enum Creatures { BOSS_COMMANDER_RIMOK = 56636, BOSS_RAIGONN = 56877, BOSS_SABOTEUR = 56906, BOSS_STRIKER_GADOK = 56589, }; enum Objects { }; enum Data { DATA_SABOTEUR = 1, DATA_STRIKER_GADOK = 2, DATA_COMMANDER_RIMOK = 3, DATA_RAIGONN = 4, }; #endif
ahuraa/DeathCore_6.x.x
src/server/scripts/Pandaria/GateoftheSettingSun/gate_of_the_setting_sun.h
C
gpl-2.0
1,128
/******************************************************************************/ /* Да се најде хексален запис на декадниот број n. */ /* */ /* Пример влез: */ /* */ /* Пример излез: */ /******************************************************************************/ int main() { return 0; }
gdarko/vezbi_programiranje
zbirka_2/nizi/zad_184.c
C
gpl-2.0
656
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) * Copyright(c) 2015-2018 Intel Corporation */ #ifndef _ICP_QAT_FW_LA_H_ #define _ICP_QAT_FW_LA_H_ #include "icp_qat_fw.h" enum icp_qat_fw_la_cmd_id { ICP_QAT_FW_LA_CMD_CIPHER = 0, ICP_QAT_FW_LA_CMD_AUTH = 1, ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, ICP_QAT_FW_LA_CMD_MGF1 = 9, ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, ICP_QAT_FW_LA_CMD_DELIMITER = 12 }; #define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK #define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR #define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK #define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR struct icp_qat_fw_la_bulk_req { struct icp_qat_fw_comn_req_hdr comn_hdr; struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; struct icp_qat_fw_comn_req_mid comn_mid; struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; }; #define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 #define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 #define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 #define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 #define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 #define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 #define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 #define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 #define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 #define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 #define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 #define ICP_QAT_FW_LA_GCM_PROTO 2 #define ICP_QAT_FW_LA_CCM_PROTO 1 #define ICP_QAT_FW_LA_NO_PROTO 0 #define QAT_LA_PROTO_BITPOS 7 #define QAT_LA_PROTO_MASK 0x7 #define ICP_QAT_FW_LA_CMP_AUTH_RES 1 #define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 #define QAT_LA_CMP_AUTH_RES_BITPOS 6 #define QAT_LA_CMP_AUTH_RES_MASK 0x1 #define ICP_QAT_FW_LA_RET_AUTH_RES 1 #define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 #define QAT_LA_RET_AUTH_RES_BITPOS 5 #define QAT_LA_RET_AUTH_RES_MASK 0x1 #define ICP_QAT_FW_LA_UPDATE_STATE 1 #define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 #define QAT_LA_UPDATE_STATE_BITPOS 4 #define QAT_LA_UPDATE_STATE_MASK 0x1 #define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 #define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 #define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 #define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 #define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 #define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 #define QAT_LA_CIPH_IV_FLD_BITPOS 2 #define QAT_LA_CIPH_IV_FLD_MASK 0x1 #define ICP_QAT_FW_LA_PARTIAL_NONE 0 #define ICP_QAT_FW_LA_PARTIAL_START 1 #define ICP_QAT_FW_LA_PARTIAL_MID 3 #define ICP_QAT_FW_LA_PARTIAL_END 2 #define QAT_LA_PARTIAL_BITPOS 0 #define QAT_LA_PARTIAL_MASK 0x3 #define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ cmp_auth, ret_auth, update_state, \ ciph_iv, ciphcfg, partial) \ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ ((proto & QAT_LA_PROTO_MASK) << \ QAT_LA_PROTO_BITPOS) | \ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ QAT_LA_CMP_AUTH_RES_BITPOS) | \ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ QAT_LA_RET_AUTH_RES_BITPOS) | \ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ QAT_LA_UPDATE_STATE_BITPOS) | \ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ QAT_LA_CIPH_IV_FLD_BITPOS) | \ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ ((partial & QAT_LA_PARTIAL_MASK) << \ QAT_LA_PARTIAL_BITPOS)) #define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ QAT_LA_CIPH_IV_FLD_MASK) #define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) #define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) #define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ QAT_LA_GCM_IV_LEN_FLAG_MASK) #define ICP_QAT_FW_LA_PROTO_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) #define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ QAT_LA_CMP_AUTH_RES_MASK) #define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ QAT_LA_RET_AUTH_RES_MASK) #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ QAT_LA_DIGEST_IN_BUFFER_MASK) #define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ QAT_LA_UPDATE_STATE_MASK) #define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ QAT_LA_PARTIAL_MASK) #define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ QAT_LA_CIPH_IV_FLD_MASK) #define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) #define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) #define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ QAT_LA_GCM_IV_LEN_FLAG_MASK) #define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ QAT_LA_PROTO_MASK) #define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ QAT_LA_CMP_AUTH_RES_MASK) #define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ QAT_LA_RET_AUTH_RES_MASK) #define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ QAT_LA_DIGEST_IN_BUFFER_MASK) #define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ QAT_LA_UPDATE_STATE_MASK) #define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ QAT_LA_PARTIAL_MASK) struct icp_qat_fw_cipher_req_hdr_cd_pars { union { struct { uint64_t content_desc_addr; uint16_t content_desc_resrvd1; uint8_t content_desc_params_sz; uint8_t content_desc_hdr_resrvd2; uint32_t content_desc_resrvd3; } s; struct { uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } s1; } u; }; struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { union { struct { uint64_t content_desc_addr; uint16_t content_desc_resrvd1; uint8_t content_desc_params_sz; uint8_t content_desc_hdr_resrvd2; uint32_t content_desc_resrvd3; } s; struct { uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; } sl; } u; }; struct icp_qat_fw_cipher_cd_ctrl_hdr { uint8_t cipher_state_sz; uint8_t cipher_key_sz; uint8_t cipher_cfg_offset; uint8_t next_curr_id; uint8_t cipher_padding_sz; uint8_t resrvd1; uint16_t resrvd2; uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; }; struct icp_qat_fw_auth_cd_ctrl_hdr { uint32_t resrvd1; uint8_t resrvd2; uint8_t hash_flags; uint8_t hash_cfg_offset; uint8_t next_curr_id; uint8_t resrvd3; uint8_t outer_prefix_sz; uint8_t final_sz; uint8_t inner_res_sz; uint8_t resrvd4; uint8_t inner_state1_sz; uint8_t inner_state2_offset; uint8_t inner_state2_sz; uint8_t outer_config_offset; uint8_t outer_state1_sz; uint8_t outer_res_sz; uint8_t outer_prefix_offset; }; struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { uint8_t cipher_state_sz; uint8_t cipher_key_sz; uint8_t cipher_cfg_offset; uint8_t next_curr_id_cipher; uint8_t cipher_padding_sz; uint8_t hash_flags; uint8_t hash_cfg_offset; uint8_t next_curr_id_auth; uint8_t resrvd1; uint8_t outer_prefix_sz; uint8_t final_sz; uint8_t inner_res_sz; uint8_t resrvd2; uint8_t inner_state1_sz; uint8_t inner_state2_offset; uint8_t inner_state2_sz; uint8_t outer_config_offset; uint8_t outer_state1_sz; uint8_t outer_res_sz; uint8_t outer_prefix_offset; }; #define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 #define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 #define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 #define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) #define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) struct icp_qat_fw_la_cipher_req_params { uint32_t cipher_offset; uint32_t cipher_length; union { uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; struct { uint64_t cipher_IV_ptr; uint64_t resrvd1; } s; } u; }; struct icp_qat_fw_la_auth_req_params { uint32_t auth_off; uint32_t auth_len; union { uint64_t auth_partial_st_prefix; uint64_t aad_adr; } u1; uint64_t auth_res_addr; union { uint8_t inner_prefix_sz; uint8_t aad_sz; } u2; uint8_t resrvd1; uint8_t hash_state_sz; uint8_t auth_res_sz; } __rte_packed; struct icp_qat_fw_la_auth_req_params_resrvd_flds { uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; union { uint8_t inner_prefix_sz; uint8_t aad_sz; } u2; uint8_t resrvd1; uint16_t resrvd2; }; struct icp_qat_fw_la_resp { struct icp_qat_fw_comn_resp_hdr comn_resp; uint64_t opaque_data; uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; }; #define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) #define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ { (cd_ctrl_hdr_t)->next_curr_id_cipher = \ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } #define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ & ICP_QAT_FW_COMN_CURR_ID_MASK) #define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ { (cd_ctrl_hdr_t)->next_curr_id_cipher = \ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } #define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) #define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ { (cd_ctrl_hdr_t)->next_curr_id_auth = \ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } #define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ (((cd_ctrl_hdr_t)->next_curr_id_auth) \ & ICP_QAT_FW_COMN_CURR_ID_MASK) #define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ { (cd_ctrl_hdr_t)->next_curr_id_auth = \ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } #endif
emmericp/dpdk
drivers/common/qat/qat_adf/icp_qat_fw_la.h
C
gpl-2.0
11,275
using System; using Server.Targeting; namespace Server.Spells.Second { public class CunningSpell : MagerySpell { private static readonly SpellInfo m_Info = new SpellInfo( "Cunning", "Uus Wis", 212, 9061, Reagent.MandrakeRoot, Reagent.Nightshade); public CunningSpell(Mobile caster, Item scroll) : base(caster, scroll, m_Info) { } public override SpellCircle Circle { get { return SpellCircle.Second; } } public override void OnCast() { this.Caster.Target = new InternalTarget(this); } public void Target(Mobile m) { if (!this.Caster.CanSee(m)) { this.Caster.SendLocalizedMessage(500237); // Target can not be seen. } else if (this.CheckBSequence(m)) { int oldInt = SpellHelper.GetBuffOffset(m, StatType.Int); int newInt = SpellHelper.GetOffset(Caster, m, StatType.Int, false, true); if (newInt < oldInt || newInt == 0) { DoHurtFizzle(); } else { SpellHelper.Turn(this.Caster, m); SpellHelper.AddStatBonus(this.Caster, m, false, StatType.Int); int percentage = (int)(SpellHelper.GetOffsetScalar(this.Caster, m, false) * 100); TimeSpan length = SpellHelper.GetDuration(this.Caster, m); BuffInfo.AddBuff(m, new BuffInfo(BuffIcon.Cunning, 1075843, length, m, percentage.ToString())); m.FixedParticles(0x375A, 10, 15, 5011, EffectLayer.Head); m.PlaySound(0x1EB); } } this.FinishSequence(); } private class InternalTarget : Target { private readonly CunningSpell m_Owner; public InternalTarget(CunningSpell owner) : base(Core.ML ? 10 : 12, false, TargetFlags.Beneficial) { this.m_Owner = owner; } protected override void OnTarget(Mobile from, object o) { if (o is Mobile) { this.m_Owner.Target((Mobile)o); } } protected override void OnTargetFinish(Mobile from) { this.m_Owner.FinishSequence(); } } } }
Frazurbluu/ServUO
Scripts/Spells/Second/Cunning.cs
C#
gpl-2.0
2,616
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2020 Viresh Kumar <viresh.kumar@linaro.org> * * Basic fspick() failure tests. */ #include "tst_test.h" #include "lapi/fsmount.h" #define MNTPOINT "mntpoint" static struct tcase { char *name; int dirfd; const char *pathname; unsigned int flags; int exp_errno; } tcases[] = { {"invalid-fd", -1, MNTPOINT, FSPICK_NO_AUTOMOUNT | FSPICK_CLOEXEC, EBADF}, {"invalid-path", AT_FDCWD, "invalid", FSPICK_NO_AUTOMOUNT | FSPICK_CLOEXEC, ENOENT}, {"invalid-flags", AT_FDCWD, MNTPOINT, 0x10, EINVAL}, }; static void run(unsigned int n) { struct tcase *tc = &tcases[n]; TEST(fspick(tc->dirfd, tc->pathname, tc->flags)); if (TST_RET != -1) { SAFE_CLOSE(TST_RET); tst_res(TFAIL, "%s: fspick() succeeded unexpectedly (index: %d)", tc->name, n); return; } if (tc->exp_errno != TST_ERR) { tst_res(TFAIL | TTERRNO, "%s: fspick() should fail with %s", tc->name, tst_strerrno(tc->exp_errno)); return; } tst_res(TPASS | TTERRNO, "%s: fspick() failed as expected", tc->name); } static struct tst_test test = { .tcnt = ARRAY_SIZE(tcases), .test = run, .setup = fsopen_supported_by_kernel, .needs_root = 1, .mount_device = 1, .mntpoint = MNTPOINT, .all_filesystems = 1, .skip_filesystems = (const char *const []){"fuse", NULL}, };
linux-test-project/ltp
testcases/kernel/syscalls/fspick/fspick02.c
C
gpl-2.0
1,319
/* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator http://lammps.sandia.gov, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #ifdef FIX_CLASS FixStyle(box/relax,FixBoxRelax) #else #ifndef LMP_FIX_BOX_RELAX_H #define LMP_FIX_BOX_RELAX_H #include "fix.h" namespace LAMMPS_NS { class FixBoxRelax : public Fix { public: FixBoxRelax(class LAMMPS *, int, char **); ~FixBoxRelax(); int setmask(); void init(); double min_energy(double *); void min_store(); void min_clearstore(); void min_pushstore(); void min_popstore(); int min_reset_ref(); void min_step(double, double *); double max_alpha(double *); int min_dof(); int modify_param(int, char **); private: int p_flag[6]; int pstyle,pcouple,allremap; int dimension; double p_target[6],p_current[6]; double vol0,xprdinit,yprdinit,zprdinit; double vmax,pv2e,pflagsum; int kspace_flag; int current_lifo; // LIFO stack pointer double boxlo0[2][3]; // box bounds at start of line search double boxhi0[2][3]; double boxtilt0[2][3]; // xy,xz,yz tilts at start of line search double s0[3]; // scale matrix at start of line search double ds[6]; // increment in scale matrix char *id_temp,*id_press; class Compute *temperature,*pressure; int tflag,pflag; int nrigid; int *rfix; double sigma[6]; // scaled target stress double utsigma[3]; // weighting for upper-tri elements // of modified sigma int sigmamod_flag; // 1 if modified sigma to be used double fdev[6]; // Deviatoric force on cell int deviatoric_flag; // 0 if target stress tensor is hydrostatic double h0[6]; // h_inv of reference (zero strain) box double h0_inv[6]; // h_inv of reference (zero strain) box int nreset_h0; // interval for resetting h0 double p_hydro; // hydrostatic component of target stress void remap(); void couple(); void compute_sigma(); void compute_deviatoric(); double compute_strain_energy(); void compute_press_target(); double compute_scalar(); }; } #endif #endif
browndeer/lammps-ocl
src/fix_box_relax.h
C
gpl-2.0
2,753
/* * Copyright (c) International Business Machines Corp., 2006 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * Here we keep all the UBI debugging stuff which should normally be disabled * and compiled-out, but it is extremely helpful when hunting bugs or doing big * changes. */ #include "ubi-barebox.h" #ifdef CONFIG_MTD_UBI_DEBUG_MSG #include "ubi.h" /** * ubi_dbg_dump_ec_hdr - dump an erase counter header. * @ec_hdr: the erase counter header to dump */ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) { dbg_msg("erase counter header dump:"); dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic)); dbg_msg("version %d", (int)ec_hdr->version); dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec)); dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset)); dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset)); dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc)); dbg_msg("erase counter header hexdump:"); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ec_hdr, UBI_EC_HDR_SIZE, 1); } /** * ubi_dbg_dump_vid_hdr - dump a volume identifier header. * @vid_hdr: the volume identifier header to dump */ void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) { dbg_msg("volume identifier header dump:"); dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic)); dbg_msg("version %d", (int)vid_hdr->version); dbg_msg("vol_type %d", (int)vid_hdr->vol_type); dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag); dbg_msg("compat %d", (int)vid_hdr->compat); dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id)); dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum)); dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver)); dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size)); dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs)); dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad)); dbg_msg("sqnum %llu", (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc)); dbg_msg("volume identifier header hexdump:"); } /** * ubi_dbg_dump_vol_info- dump volume information. * @vol: UBI volume description object */ void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) { dbg_msg("volume information dump:"); dbg_msg("vol_id %d", vol->vol_id); dbg_msg("reserved_pebs %d", vol->reserved_pebs); dbg_msg("alignment %d", vol->alignment); dbg_msg("data_pad %d", vol->data_pad); dbg_msg("vol_type %d", vol->vol_type); dbg_msg("name_len %d", vol->name_len); dbg_msg("usable_leb_size %d", vol->usable_leb_size); dbg_msg("used_ebs %d", vol->used_ebs); dbg_msg("used_bytes %lld", vol->used_bytes); dbg_msg("last_eb_bytes %d", vol->last_eb_bytes); dbg_msg("corrupted %d", vol->corrupted); dbg_msg("upd_marker %d", vol->upd_marker); if (vol->name_len <= UBI_VOL_NAME_MAX && strnlen(vol->name, vol->name_len + 1) == vol->name_len) { dbg_msg("name %s", vol->name); } else { dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", vol->name[0], vol->name[1], vol->name[2], vol->name[3], vol->name[4]); } } /** * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object. * @r: the object to dump * @idx: volume table index */ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) { int name_len = be16_to_cpu(r->name_len); dbg_msg("volume table record %d dump:", idx); dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs)); dbg_msg("alignment %d", be32_to_cpu(r->alignment)); dbg_msg("data_pad %d", be32_to_cpu(r->data_pad)); dbg_msg("vol_type %d", (int)r->vol_type); dbg_msg("upd_marker %d", (int)r->upd_marker); dbg_msg("name_len %d", name_len); if (r->name[0] == '\0') { dbg_msg("name NULL"); return; } if (name_len <= UBI_VOL_NAME_MAX && strnlen(&r->name[0], name_len + 1) == name_len) { dbg_msg("name %s", &r->name[0]); } else { dbg_msg("1st 5 characters of the name: %c%c%c%c%c", r->name[0], r->name[1], r->name[2], r->name[3], r->name[4]); } dbg_msg("crc %#08x", be32_to_cpu(r->crc)); } /** * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object. * @sv: the object to dump */ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) { dbg_msg("volume scanning information dump:"); dbg_msg("vol_id %d", sv->vol_id); dbg_msg("highest_lnum %d", sv->highest_lnum); dbg_msg("leb_count %d", sv->leb_count); dbg_msg("compat %d", sv->compat); dbg_msg("vol_type %d", sv->vol_type); dbg_msg("used_ebs %d", sv->used_ebs); dbg_msg("last_data_size %d", sv->last_data_size); dbg_msg("data_pad %d", sv->data_pad); } /** * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object. * @seb: the object to dump * @type: object type: 0 - not corrupted, 1 - corrupted */ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) { dbg_msg("eraseblock scanning information dump:"); dbg_msg("ec %d", seb->ec); dbg_msg("pnum %d", seb->pnum); if (type == 0) { dbg_msg("lnum %d", seb->lnum); dbg_msg("scrub %d", seb->scrub); dbg_msg("sqnum %llu", seb->sqnum); dbg_msg("leb_ver %u", seb->leb_ver); } } /** * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object. * @req: the object to dump */ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) { char nm[17]; dbg_msg("volume creation request dump:"); dbg_msg("vol_id %d", req->vol_id); dbg_msg("alignment %d", req->alignment); dbg_msg("bytes %lld", (long long)req->bytes); dbg_msg("vol_type %d", req->vol_type); dbg_msg("name_len %d", req->name_len); memcpy(nm, req->name, 16); nm[16] = 0; dbg_msg("the 1st 16 characters of the name: %s", nm); } #endif /* CONFIG_MTD_UBI_DEBUG_MSG */
jkent/mini210s-barebox
drivers/mtd/ubi/debug.c
C
gpl-2.0
6,438
using PowerPointLabs.ActionFramework.Common.Attribute; using PowerPointLabs.ActionFramework.Common.Interface; using PowerPointLabs.EffectsLab; using PowerPointLabs.TextCollection; namespace PowerPointLabs.ActionFramework.EffectsLab { [ExportLabelRibbonId(EffectsLabText.BlurrinessTag)] class EffectsLabBlurrinessLabelHandler : LabelHandler { protected override string GetLabel(string ribbonId) { if (ribbonId.Contains(CommonText.DynamicMenuButtonId)) { return EffectsLabText.BlurrinessButtonLabel; } if (ribbonId.Contains(EffectsLabText.BlurrinessCustom)) { int percentage = 0; if (ribbonId.StartsWith(EffectsLabText.BlurrinessFeatureSelected)) { percentage = EffectsLabSettings.CustomPercentageSelected; } else if (ribbonId.StartsWith(EffectsLabText.BlurrinessFeatureRemainder)) { percentage = EffectsLabSettings.CustomPercentageRemainder; } else if (ribbonId.StartsWith(EffectsLabText.BlurrinessFeatureBackground)) { percentage = EffectsLabSettings.CustomPercentageBackground; } return EffectsLabText.BlurrinessCustomPrefixLabel + " (" + percentage + "%)"; } int startIndex = ribbonId.IndexOf(CommonText.DynamicMenuOptionId) + CommonText.DynamicMenuOptionId.Length; string percentageText = ribbonId.Substring(startIndex, ribbonId.Length - startIndex); return percentageText + "% " + EffectsLabText.BlurrinessTag; } } }
initialshl/PowerPointLabs
PowerPointLabs/PowerPointLabs/ActionFramework/EffectsLab/BlurMenuContent/EffectsLabBlurrinessLabelHandler.cs
C#
gpl-2.0
1,728
/* Copyright (C) 2016 by the Battle for Wesnoth Project http://www.wesnoth.org/ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY. See the COPYING file for more details. */ #ifndef GUI_DIALOGS_STATISTICS_DIALOG_HPP_INCLUDED #define GUI_DIALOGS_STATISTICS_DIALOG_HPP_INCLUDED #include "gui/dialogs/modal_dialog.hpp" #include "statistics.hpp" class CVideo; class team; namespace gui2 { class menu_button; namespace dialogs { class statistics_dialog : public modal_dialog { public: statistics_dialog(const team& current_team); static void display(const team& current_team, CVideo& video) { statistics_dialog(current_team).show(video); } private: /** Inherited from modal_dialog, implemented by REGISTER_DIALOG. */ virtual const std::string& window_id() const; /** Inherited from modal_dialog. */ void pre_show(window& window); /** * Picks out the stats structure that was selected for displaying. */ inline const statistics::stats & current_stats(); void add_stat_row(window& window, const std::string& type, const statistics::stats::str_int_map& value, const bool has_cost = true); void add_damage_row( window& window, const std::string& type, const long long& damage, const long long& expected, const long long& turn_damage, const long long& turn_expected, const bool show_this_turn); void update_lists(window& window); void on_primary_list_select(window& window); void on_scenario_select(window& window); void on_tab_select(window& window); const team& current_team_; const statistics::stats campaign_; const statistics::levels scenarios_; size_t scenario_index_; std::vector<const statistics::stats::str_int_map*> main_stat_table_; }; } // namespace dialogs } // namespace gui2 #endif /* ! GUI_DIALOGS_STATISTICS_DIALOG_HPP_INCLUDED */
gunchleoc/wesnoth
src/gui/dialogs/statistics_dialog.hpp
C++
gpl-2.0
2,105
<?php /** * Magento * * NOTICE OF LICENSE * * This source file is subject to the Open Software License (OSL 3.0) * that is bundled with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://opensource.org/licenses/osl-3.0.php * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@magento.com so we can send you a copy immediately. * * DISCLAIMER * * Do not edit or add to this file if you wish to upgrade Magento to newer * versions in the future. If you wish to customize Magento for your * needs please refer to http://www.magento.com for more information. * * @category Mage * @package Mage_Dataflow * @copyright Copyright (c) 2006-2017 X.commerce, Inc. and affiliates (http://www.magento.com) * @license http://opensource.org/licenses/osl-3.0.php Open Software License (OSL 3.0) */ /** * Convert php serialize parser * * @category Mage * @package Mage_Dataflow * @author Magento Core Team <core@magentocommerce.com> */ class Mage_Dataflow_Model_Convert_Parser_Serialize extends Mage_Dataflow_Model_Convert_Parser_Abstract { public function parse() { $this->setData(unserialize($this->getData())); return $this; } public function unparse() { $this->setData(serialize($this->getData())); return $this; } }
miguelangelramirez/magento.dev
app/code/core/Mage/Dataflow/Model/Convert/Parser/Serialize.php
PHP
gpl-2.0
1,461
/* * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package org.graalvm.compiler.hotspot.aarch64; import static jdk.vm.ci.aarch64.AArch64.zr; import org.graalvm.compiler.asm.Label; import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler; import org.graalvm.compiler.lir.LIRInstructionClass; import org.graalvm.compiler.lir.Opcode; import org.graalvm.compiler.lir.aarch64.AArch64LIRInstruction; import org.graalvm.compiler.lir.asm.CompilationResultBuilder; import jdk.vm.ci.code.Register; @Opcode("CRUNTIME_CALL_EPILOGUE") public class AArch64HotSpotCRuntimeCallEpilogueOp extends AArch64LIRInstruction { public static final LIRInstructionClass<AArch64HotSpotCRuntimeCallEpilogueOp> TYPE = LIRInstructionClass.create(AArch64HotSpotCRuntimeCallEpilogueOp.class); private final int threadLastJavaSpOffset; private final int threadLastJavaPcOffset; private final Register thread; @SuppressWarnings("unused") private final Label label; public AArch64HotSpotCRuntimeCallEpilogueOp(int threadLastJavaSpOffset, int threadLastJavaPcOffset, Register thread, Label label) { super(TYPE); this.threadLastJavaSpOffset = threadLastJavaSpOffset; this.threadLastJavaPcOffset = threadLastJavaPcOffset; this.thread = thread; this.label = label; } @Override public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) { // Reset last Java frame: masm.str(64, zr, masm.makeAddress(thread, threadLastJavaSpOffset, 8)); masm.str(64, zr, masm.makeAddress(thread, threadLastJavaPcOffset, 8)); } }
md-5/jdk10
src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotCRuntimeCallEpilogueOp.java
Java
gpl-2.0
2,615
/*************************************************************************** file : OsgSky.h created : Mon Aug 21 18:24:02 CEST 2012 copyright : (C) 2012 by Xavier Bertaux email : bertauxx@yahoo.fr version : $Id$ ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #ifndef _OSGSKY_H #define _OSGSKY_H #include <vector> #include <string> #include <osg/ref_ptr> #include <osg/MatrixTransform> #include <osg/Node> #include <osg/Switch> #include "OsgCloud.h" #include "OsgDome.h" #include "OsgMoon.h" #include "OsgSun.h" #include "OsgStars.h" using std::vector; class SDCloudLayer; //class SDCloudLayerList; class SDSun; class SDMoon; class SDStars; class SDSkyDome; class SDSky; /*typedef struct { osg::Vec3d pos; double spin; double gst; double sun_dist; double moon_dist; double sun_angle; } SDSkyState; typedef struct { osg::Vec3f sky_color; osg::Vec3f adj_sky_color; osg::Vec3f fog_color; osg::Vec3f cloud_color; double sun_angle, moon_angle; } SDSkyColor;*/ enum NodeMask { BACKGROUND_BIT = (1 << 11), MODEL_BIT = (1 << 12), }; class SDSky { private: typedef std::vector<SDCloudLayer *> layer_list_type; typedef layer_list_type::iterator layer_list_iterator; typedef layer_list_type::const_iterator layer_list_const_iterator; // components of the sky SDSkyDome* dome; SDSun* sun; SDMoon* moon; SDStars* planets; SDStars* stars; layer_list_type cloud_layers; osg::ref_ptr<osg::Group> pre_root, cloud_root; osg::ref_ptr<osg::Switch> pre_selector; osg::ref_ptr<osg::Group> pre_transform; // visibility float visibility; float effective_visibility; float minimum_sky_visibility; int in_cloud; int cur_layer_pos; bool in_puff; double puff_length; double puff_progression; double ramp_up; double ramp_down; // 3D clouds enabled bool clouds_3d_enabled; // 3D cloud density double clouds_3d_density; public: /** Constructor */ SDSky( void ); /** Destructor */ ~SDSky( void ); void build( std::string tex_path, double h_radius, double v_radius, double sun_size, double sun_dist, double moon_size, double moon_dist, int nplanets, osg::Vec3d *planet_data, int nstars, osg::Vec3d *star_data ); bool repaint (osg::Vec3f &sky_color, osg::Vec3f &fog_color, osg::Vec3f &cloud_color, double sun_angle, double moon_angle, int nplanets, osg::Vec3d *planet_data, int nstars, osg::Vec3d *star_data); bool reposition(osg::Vec3 &view_pos, double spin, /*double gst,*/ double dt); void modify_vis( float alt, float time_factor ); osg::Node* getPreRoot() { return pre_root.get(); } osg::Node* getCloudRoot() { return cloud_root.get(); } void texture_path( const std::string& path ); inline void enable() { pre_selector->setValue(0, 1); } inline void disable() { pre_selector->setValue(0, 0); } inline osg::Vec4f get_sun_color() { return sun->get_color(); } inline osg::Vec4f get_scene_color() { return sun->get_scene_color(); } void add_cloud_layer (SDCloudLayer * layer); const SDCloudLayer * get_cloud_layer (int i) const; SDCloudLayer * get_cloud_layer (int i); int get_cloud_layer_count () const; void setMA(double angle) { moon->setMoonAngle(angle); } double getMA() { return moon->getMoonAngle(); } void setMR(double rotation) { moon->setMoonRotation( rotation); } double getMR() { return moon->getMoonRotation(); } void setMRA( double ra ) { moon->setMoonRightAscension( ra ); } double getMRA() { return moon->getMoonRightAscension(); } void setMD( double decl ) { moon->setMoonDeclination( decl ); } double getMD() { return moon->getMoonDeclination(); } void setMDist( double dist ) { moon->setMoonDist(dist); } double getMDist() { return moon->getMoonDist(); } void setSA(double angle) { sun->setSunAngle(angle); } double getSA() { return sun->getSunAngle(); } void setSR(double rotation) { sun->setSunRotation( rotation ); } double getSR() { return sun->getSunRotation(); } void setSRA(double ra) { sun->setSunRightAscension( ra ); } double getSRA() { return sun->getSunRightAscension(); } void setSD( double decl ) { sun->setSunDeclination( decl ); } double getSD() { return sun->getSunDeclination(); } void setSDistance( double dist ) { sun->setSunDistance( dist ); } double getSDistance() { return sun->getSunDistance(); } inline float get_visibility() const { return effective_visibility; } inline void set_visibility( float v ) { effective_visibility = visibility = (v <= 25.0) ? 25.0 : v; } inline SDSun * getSun(){return sun;} inline osg::Vec3f sunposition() {return sun->getSunPosition();} //virtual double get_3dCloudDensity() const; //virtual void set_3dCloudDensity(double density); //virtual float get_3dCloudVisRange() const; //virtual void set_3dCloudVisRange(float vis); }; #endif // _OSGSKY_H
Donald-Otto/SpeedDreamsForkProject
src/modules/graphic/osggraph/OsgSky/OsgSky.h
C
gpl-2.0
5,757
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/gpio_event.h> #include <linux/i2c-gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <mach/board.h> #include <mach/msm_iomap.h> #include <mach/msm_hsusb.h> #include <mach/rpc_hsusb.h> #include <mach/rpc_pmapp.h> #include <mach/usbdiag.h> #include <mach/usb_gadget_fserial.h> #include <mach/msm_memtypes.h> #include <mach/msm_serial_hs.h> #include <linux/usb/android.h> #include <linux/platform_device.h> #include <linux/io.h> //#include <mach/gpio-v1.h> #ifdef CONFIG_MACH_JENA #include <mach/gpio_jena.h> #elif defined(CONFIG_MACH_AMAZING_CDMA) #include <mach/gpio_amazing_cdma.h> #else #include <mach/gpio_trebon.h> #endif #include <mach/pmic.h> #include <mach/socinfo.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <asm/mach/mmc.h> #include <linux/i2c.h> #include <linux/i2c/sx150x.h> #include <linux/gpio.h> #include <linux/android_pmem.h> #include <linux/bootmem.h> #include <linux/mfd/marimba.h> #include <mach/vreg.h> #include <linux/power_supply.h> #include <linux/regulator/consumer.h> #include <mach/rpc_pmapp.h> #include <mach/msm_battery.h> #include <linux/smsc911x.h> #include <linux/atmel_maxtouch.h> //todo*** remove atmel changes #include "devices.h" #include "timer.h" #include "board-msm7x27a-regulator.h" #include "devices-msm7x2xa.h" #include "pm.h" #ifdef CONFIG_SAMSUNG_JACK #include <linux/sec_jack.h> #endif #include <mach/rpc_server_handset.h> #include <mach/socinfo.h> #include <linux/fsaxxxx_usbsw.h> #include "proc_comm.h" #include "pm-boot.h" #include "board-msm7627a.h" #ifdef CONFIG_PROXIMITY_SENSOR #include <linux/gp2a.h> #endif #ifdef CONFIG_TOUCHSCREEN_MELFAS_TS #include <linux/i2c/melfas_ts.h> #endif #ifdef CONFIG_MAX17043_FUELGAUGE #include <linux/fuelgauge_max17043.h> #endif #ifdef CONFIG_CHARGER_SMB328A #include <linux/smb328a_charger.h> #endif #ifdef CONFIG_TOUCHSCREEN_MELFAS_TS #define MELFAS_TSP_ADDR 0x48 // melfas TSP slave address #endif #define PMEM_KERNEL_EBI1_SIZE 0x3A000 #define MSM_PMEM_AUDIO_SIZE 0x5B000 int charging_boot; EXPORT_SYMBOL(charging_boot); int fota_boot; EXPORT_SYMBOL(fota_boot); #ifdef CONFIG_MACH_AMAZING_CDMA #define ENABLE_WLAN_LDO #endif #define ENABLE_WLAN_LDO #ifdef ENABLE_WLAN_LDO struct regulator *wlan_reg; #endif #define WLAN_33V_CONTROL_FOR_BT_ANTENNA #define WLAN_OK (0) #define WLAN_ERROR (-1) #ifdef WLAN_33V_CONTROL_FOR_BT_ANTENNA #define WLAN_33V_WIFI_FLAG (0x01) #define WLAN_33V_BT_FLAG (0x02) int wlan_33v_flag; #endif #ifdef CONFIG_SAMSUNG_JACK #define GPIO_JACK_S_35 48 #define GPIO_SEND_END 92 static struct sec_jack_zone jack_zones[] = { [0] = { .adc_high = 3, .delay_ms = 10, .check_count = 5, .jack_type = SEC_HEADSET_3POLE, }, [1] = { .adc_high = 99, .delay_ms = 10, .check_count = 10, .jack_type = SEC_HEADSET_3POLE, }, [2] = { .adc_high = 9999, .delay_ms = 10, .check_count = 5, .jack_type = SEC_HEADSET_4POLE, }, }; int get_msm7x27a_det_jack_state(void) { /* Active Low */ return(gpio_get_value(GPIO_JACK_S_35)) ^ 1; } EXPORT_SYMBOL(get_msm7x27a_det_jack_state); static int get_msm7x27a_send_key_state(void) { return current_key_state; /* refer from rpc_server_handset */ } #define SMEM_PROC_COMM_MICBIAS_ONOFF PCOM_OEM_MICBIAS_ONOFF #define SMEM_PROC_COMM_MICBIAS_ONOFF_REG5 PCOM_OEM_MICBIAS_ONOFF_REG5 #define SMEM_PROC_COMM_GET_ADC PCOM_OEM_SAMSUNG_GET_ADC enum { SMEM_PROC_COMM_GET_ADC_BATTERY = 0x0, SMEM_PROC_COMM_GET_ADC_TEMP, SMEM_PROC_COMM_GET_ADC_VF, SMEM_PROC_COMM_GET_ADC_ALL, // data1 : VF(MSB 2 bytes) vbatt_adc(LSB 2bytes), data2 : temp_adc SMEM_PROC_COMM_GET_ADC_EAR_ADC, // 3PI_ADC SMEM_PROC_COMM_GET_ADC_MAX, }; enum { SMEM_PROC_COMM_MICBIAS_CONTROL_OFF = 0x0, SMEM_PROC_COMM_MICBIAS_CONTROL_ON, SMEM_PROC_COMM_MICBIAS_CONTROL_MAX }; static void set_msm7x27a_micbias_state_reg5(bool state) { /* int res = 0; * int data1 = 0; * int data2 = 0; * if (!state) * { * data1 = SMEM_PROC_COMM_MICBIAS_CONTROL_OFF; * res = msm_proc_comm(SMEM_PROC_COMM_MICBIAS_ONOFF_REG5, &data1, &data2); * if(res < 0) * { * pr_err("sec_jack: micbias_reg5 %s fail \n",state?"on":"off"); * } * } */ } static bool cur_state = false; static bool proximity_init; static void set_msm7x27a_micbias_state(bool state) { if(cur_state == state) { pr_info("sec_jack : earmic_bias same as cur_state\n"); return; } if(state) { pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_ALWAYS); msleep(130); cur_state = true; } else { pmic_hsed_enable(PM_HSED_CONTROLLER_0, PM_HSED_ENABLE_OFF); cur_state = false; } report_headset_status(state); pr_info("sec_jack : earmic_bias %s\n", state?"on":"off"); } static int sec_jack_get_adc_value(void) { return current_jack_type; /* refer from rpc_server_handset */ } #if 0 static int sec_jack_read_adc(int channel, int *adc_data) { int ret; void *h; struct adc_chan_result adc_chan_result; struct completion conv_complete_evt; pr_info("%s: called for %d\n", __func__, channel); ret = adc_channel_open(channel, &h); if (ret) { pr_err("%s: couldnt open channel %d ret=%d\n", __func__, channel, ret); goto out; } init_completion(&conv_complete_evt); ret = adc_channel_request_conv(h, &conv_complete_evt); if (ret) { pr_err("%s: couldnt request conv channel %d ret=%d\n", __func__, channel, ret); goto out; } // wait_for_completion(&conv_complete_evt); ret = wait_for_completion_timeout(&conv_complete_evt, 10*HZ); if (!ret) { pr_err("%s: wait interrupted channel %d ret=%d\n", __func__, channel, ret); //rohbt_ics pm8058_xoadc_clear_recentQ(); goto out; } ret = adc_channel_read_result(h, &adc_chan_result); if (ret) { pr_err("%s: couldnt read result channel %d ret=%d\n", __func__, channel, ret); goto out; } ret = adc_channel_close(h); if (ret) { pr_err("%s: couldnt close channel %d ret=%d\n", __func__, channel, ret); } if(adc_data) *adc_data = adc_chan_result.measurement; pr_info("%s ADC : %d\n", __func__, adc_chan_result.physical); return adc_chan_result.physical; out: pr_err("%s: Failure for %d\n", __func__, channel); return 0;//to prevent infinite loop in determine_jack_type() -EINVAL; } static int sec_jack_get_adc_value(void) { int temp_adc = 0; int adc_data = 0; //return(gpio_get_value_cansleep(PM8058_GPIO_PM_TO_SYS(PMIC_GPIO_SHORT_SENDEND))) ^ 1; temp_adc = sec_jack_read_adc(5, &adc_data); return temp_adc; } #endif void sec_jack_gpio_init(void) { gpio_tlmm_config(GPIO_CFG(GPIO_JACK_S_35, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE); //gpio 48 JACK_INT_N if(gpio_request(GPIO_JACK_S_35, "h2w_detect")<0) pr_err("sec_jack:gpio_request fail\n"); if(gpio_direction_input(GPIO_JACK_S_35)<0) pr_err("sec_jack:gpio_direction fail\n"); } static struct sec_jack_platform_data sec_jack_data = { .get_det_jack_state = get_msm7x27a_det_jack_state, .get_send_key_state = get_msm7x27a_send_key_state, .set_micbias_state = set_msm7x27a_micbias_state, .set_micbias_state_reg5 = set_msm7x27a_micbias_state_reg5, .get_adc_value = sec_jack_get_adc_value, .zones = jack_zones, .num_zones = ARRAY_SIZE(jack_zones), .det_int = MSM_GPIO_TO_INT(GPIO_JACK_S_35), .send_int = MSM_GPIO_TO_INT(GPIO_SEND_END), }; static struct platform_device sec_device_jack = { .name = "sec_jack", .id = -1, .dev = { .platform_data = &sec_jack_data, }, }; #endif #if defined(CONFIG_GPIO_SX150X) enum { SX150X_CORE, }; static struct sx150x_platform_data sx150x_data[] __initdata = { [SX150X_CORE] = { .gpio_base = GPIO_CORE_EXPANDER_BASE, .oscio_is_gpo = false, .io_pullup_ena = 0, .io_pulldn_ena = 0x02, .io_open_drain_ena = 0xfef8, .irq_summary = -1, }, }; #endif extern unsigned int board_hw_revision; extern unsigned int kernel_uart_flag; #ifndef ATH_POLLING static void (*wlan_status_notify_cb)(int card_present, void *dev_id); void *wlan_devid; static int register_wlan_status_notify(void (*callback)(int card_present, void *dev_id), void *dev_id) { printk("%s --enter\n", __func__); wlan_status_notify_cb = callback; wlan_devid = dev_id; return 0; } static unsigned int wlan_status(struct device *dev) { int rc; printk("%s entered\n", __func__); rc = gpio_get_value(GPIO_WLAN_RESET_N/*gpio_wlan_reset_n*/); return rc; } #endif /* ATH_POLLING */ static struct platform_device msm_wlan_ar6000_pm_device = { .name = "wlan_ar6000_pm_dev", .id = -1, }; static struct platform_device msm_device_pmic_leds = { .name = "pmic-leds", .id = -1, }; #if defined(CONFIG_I2C) && defined(CONFIG_GPIO_SX150X) static struct i2c_board_info core_exp_i2c_info[] __initdata = { { I2C_BOARD_INFO("sx1509q", 0x3e), }, }; static struct platform_device msm_vibrator_device = { .name = "msm_vibrator", .id = -1, }; #ifdef CONFIG_PROXIMITY_SENSOR int LED_onoff(int on) { int rc = 0; printk("%s onoff:%d\n", __func__, on); rc = gpio_request(GPIO_PROX_LDO, "GPIO_PROX_LDO"); if (rc < 0) pr_err("failed to request prox_ldo\n"); rc = gpio_direction_output(GPIO_PROX_LDO, on); if (rc < 0) pr_err("failed to direction_output prox_ldo\n"); gpio_free(GPIO_PROX_LDO); return 0; } static int gp2a_init() { gpio_tlmm_config(GPIO_CFG(GPIO_PROX_LDO, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(GPIO_PROXI_INT, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); return 0; } static int gp2a_power(bool on) { return 0; } static struct gp2a_platform_data gp2a_pdata = { .p_out = GPIO_PROXI_INT, .power = gp2a_power, }; #endif static struct i2c_board_info sensor_devices[] = { #ifdef CONFIG_PROXIMITY_SENSOR { I2C_BOARD_INFO("gp2a",0x44), .platform_data = &gp2a_pdata, }, #endif { I2C_BOARD_INFO("bma222", 0x08), }, { I2C_BOARD_INFO("bma222e", 0x18), }, #ifdef CONFIG_SENSORS_HSCD { I2C_BOARD_INFO("hscd_i2c", 0x0c), }, #endif }; #if defined(CONFIG_SENSORS_HSCD) || defined(CONFIG_PROXIMITY_SENSOR) static struct i2c_gpio_platform_data sensor_i2c_gpio_data = { .sda_pin =GPIO_SENSOR_SDA, .scl_pin =GPIO_SENSOR_SCL, .udelay =1, }; static struct platform_device sensor_i2c_gpio_device = { .name ="i2c-gpio", .id= 4, .dev = { .platform_data =&sensor_i2c_gpio_data, }, }; #endif #if 0 static struct i2c_gpio_platform_data touch_i2c_gpio_data = { .sda_pin = GPIO_TSP_SDA, .scl_pin = GPIO_TSP_SCL, .udelay = 1, }; static struct platform_device touch_i2c_gpio_device = { .name = "i2c-gpio", .id = 2, .dev = { .platform_data = &touch_i2c_gpio_data, }, }; static struct i2c_board_info touch_i2c_devices[] = { { I2C_BOARD_INFO("zinitix_isp", 0x50), }, { I2C_BOARD_INFO("sec_touchscreen", 0x20), .irq = MSM_GPIO_TO_INT( GPIO_TOUCH_IRQ ), }, }; #endif static void __init register_i2c_devices(void) { if (machine_is_msm7x27a_surf() || machine_is_msm7625a_surf()) sx150x_data[SX150X_CORE].io_open_drain_ena = 0xe0f0; core_exp_i2c_info[0].platform_data = &sx150x_data[SX150X_CORE]; i2c_register_board_info(MSM_GSBI1_QUP_I2C_BUS_ID, core_exp_i2c_info, ARRAY_SIZE(core_exp_i2c_info)); #if defined(CONFIG_SENSORS_HSCD) || defined(CONFIG_PROXIMITY_SENSOR) i2c_register_board_info(4,sensor_devices, ARRAY_SIZE(sensor_devices)); printk("registration of devices"); #endif } #endif struct msm_battery_callback *charger_callbacks; static enum cable_type_t set_cable_status; static enum acc_type_t set_acc_status; static enum ovp_type_t set_ovp_status; #ifdef CONFIG_CHARGER_SMB328A static void smb328a_tx_charge_done(void) { if (charger_callbacks && charger_callbacks->charge_done) charger_callbacks->charge_done(charger_callbacks); } /* function pointers are initialized in "smb328a_probe" */ static struct smb328a_platform_data smb328a_pdata = { .start_charging = NULL, .stop_charging = NULL, .get_vbus_status = NULL, .chg_curr_ta = SMB328A_CURR_600, .chg_curr_ta_event = SMB328A_CURR_600, .chg_curr_usb = SMB328A_CURR_500, .chg_ac_limit_ta = SMB328A_LIMIT_600, .chg_ac_limit_usb = SMB328A_LIMIT_450, .chg_term_curr = SMB328A_TERM_200, .tx_charge_done = smb328a_tx_charge_done, }; #endif static void msm_battery_register_callback( struct msm_battery_callback *ptr) { charger_callbacks = ptr; pr_info("[BATT] msm_battery_register_callback start\n"); if ((set_acc_status != 0) && charger_callbacks && charger_callbacks->set_acc_type) charger_callbacks->set_acc_type(charger_callbacks, set_acc_status); if ((set_cable_status != 0) && charger_callbacks && charger_callbacks->set_cable) charger_callbacks->set_cable(charger_callbacks, set_cable_status); if ((set_ovp_status != 0) && charger_callbacks && charger_callbacks->set_ovp_type) charger_callbacks->set_ovp_type(charger_callbacks, set_ovp_status); } static u32 msm_calculate_batt_capacity(u32 current_voltage); static struct msm_charger_data aries_charger = { #ifdef CONFIG_CHARGER_SMB328A .charger_ic = &smb328a_pdata, #endif .register_callbacks = msm_battery_register_callback, }; static struct msm_psy_batt_pdata msm_psy_batt_data = { .charger = &aries_charger, .voltage_min_design = 2800, .voltage_max_design = 4300, .avail_chg_sources = AC_CHG | USB_CHG , .batt_technology = POWER_SUPPLY_TECHNOLOGY_LION, }; static struct platform_device msm_batt_device = { .name = "msm-battery", .id = -1, .dev.platform_data = &msm_psy_batt_data, }; /* static u32 msm_calculate_batt_capacity(u32 current_voltage) { u32 low_voltage = msm_psy_batt_data.voltage_min_design; u32 high_voltage = msm_psy_batt_data.voltage_max_design; return (current_voltage - low_voltage) * 100 / (high_voltage - low_voltage); }*/ int fsa_cable_type = CABLE_TYPE_UNKNOWN; int fsa880_get_charger_status(void); int fsa880_get_charger_status(void) { return fsa_cable_type; } void trebon_chg_connected(enum chg_type chgtype) { char *chg_types[] = {"STD DOWNSTREAM PORT", "CARKIT", "DEDICATED CHARGER", "INVALID"}; unsigned *data1 = NULL; unsigned *data2 = NULL; int ret = 0; switch (chgtype) { case USB_CHG_TYPE__SDP: ret = msm_proc_comm(PCOM_CHG_USB_IS_PC_CONNECTED, data1, data2); break; case USB_CHG_TYPE__WALLCHARGER: ret = msm_proc_comm(PCOM_CHG_USB_IS_CHARGER_CONNECTED, data1, data2); break; case USB_CHG_TYPE__INVALID: ret = msm_proc_comm(PCOM_CHG_USB_IS_DISCONNECTED, data1, data2); break; default: break; } if (ret < 0) pr_err("%s: connection err, ret=%d\n", __func__, ret); pr_info("\nCharger Type: %s\n", chg_types[chgtype]); } static void jena_usb_cb(u8 attached, struct fsausb_ops *ops) { pr_info("[BATT] [%s] Board file [FSA880]: USB Callback\n", __func__); set_acc_status = attached ? ACC_TYPE_USB : ACC_TYPE_NONE; if (charger_callbacks && charger_callbacks->set_acc_type) charger_callbacks->set_acc_type(charger_callbacks, set_acc_status); set_cable_status = attached ? CABLE_TYPE_USB : CABLE_TYPE_UNKNOWN; if (charger_callbacks && charger_callbacks->set_cable) charger_callbacks->set_cable(charger_callbacks, set_cable_status); } static void jena_charger_cb(u8 attached, struct fsausb_ops *ops) { pr_info("[BATT] Board file [FSA880]: Charger Callback\n"); set_acc_status = attached ? ACC_TYPE_CHARGER : ACC_TYPE_NONE; if (charger_callbacks && charger_callbacks->set_acc_type) charger_callbacks->set_acc_type(charger_callbacks, set_acc_status); set_cable_status = attached ? CABLE_TYPE_TA : CABLE_TYPE_UNKNOWN; if (charger_callbacks && charger_callbacks->set_cable) charger_callbacks->set_cable(charger_callbacks, set_cable_status); } static void jena_deskdock_cb(u8 attached, struct fsausb_ops *ops) { pr_info("[BATT] Board file [FSA880]: Charger Callback\n"); } static void jena_jig_cb(u8 attached, struct fsausb_ops *ops) { pr_info("[BATT] Board file [FSA880]: Jig Callback\n"); set_acc_status = attached ? ACC_TYPE_JIG : ACC_TYPE_NONE; if (charger_callbacks && charger_callbacks->set_acc_type) charger_callbacks->set_acc_type(charger_callbacks, set_acc_status); } static void jena_ovp_cb(u8 attached, struct fsausb_ops *ops) { pr_info("[BATT] Board file [FSA880]: OVP Callback\n"); set_ovp_status = attached ? OVP_TYPE_OVP : OVP_TYPE_NONE; if (charger_callbacks && charger_callbacks->set_ovp_type) charger_callbacks->set_ovp_type(charger_callbacks, set_ovp_status); } /* check charger cable type for USB phy off */ static int checkChargerType() { return set_cable_status; } static void jena_fsa880_reset_cb(void) { pr_info(" [BATT] Board file [FSA880]: Reset Callback\n"); } /* For uUSB Switch */ static struct fsausb_platform_data jena_fsa880_pdata = { .intb_gpio = MSM_GPIO_TO_INT(GPIO_MUSB_INT), .usb_cb = jena_usb_cb, .uart_cb = NULL, .charger_cb = jena_charger_cb, .deskdock_cb = jena_deskdock_cb, .jig_cb = jena_jig_cb, .ovp_cb = jena_ovp_cb, .reset_cb = jena_fsa880_reset_cb, }; /* I2C 3 */ static struct i2c_gpio_platform_data fsa880_i2c_gpio_data = { .sda_pin = GPIO_MUS_SDA, .scl_pin = GPIO_MUS_SCL, }; static struct platform_device fsa880_i2c_gpio_device = { .name = "i2c-gpio", .id = 3, .dev = { .platform_data = &fsa880_i2c_gpio_data, }, }; static struct i2c_board_info fsa880_i2c_devices[] = { { I2C_BOARD_INFO("FSA9280", 0x4A >> 1), .platform_data = &jena_fsa880_pdata, .irq = MSM_GPIO_TO_INT(GPIO_MUSB_INT), }, }; #ifdef CONFIG_BQ27425_FUEL_GAUGE #define FUEL_I2C_SCL 78 #define FUEL_I2C_SDA 79 /* Fuel_gauge */ static struct i2c_gpio_platform_data fuelgauge_i2c_gpio_data = { .sda_pin = FUEL_I2C_SDA, .scl_pin = FUEL_I2C_SCL, }; static struct platform_device fuelgauge_i2c_gpio_device = { .name = "i2c-gpio", .id = 6, .dev = { .platform_data = &fuelgauge_i2c_gpio_data, }, }; static struct i2c_board_info fg_i2c_devices[] = { { I2C_BOARD_INFO( "bq27425", 0xAA>>1 ), }, }; #endif #ifdef CONFIG_MAX17043_FUELGAUGE #define FUEL_I2C_SCL 123 #define FUEL_I2C_SDA 124 static int max17040_power_supply_register(struct device *parent, struct power_supply *psy) { aries_charger.psy_fuelgauge = psy; return 0; } static void max17040_power_supply_unregister(struct power_supply *psy) { aries_charger.psy_fuelgauge = NULL; } /* Fuel_gauge */ static struct i2c_gpio_platform_data fg_smb_i2c_gpio_data = { .sda_pin = FUEL_I2C_SDA, .scl_pin = FUEL_I2C_SCL, }; static struct platform_device fg_smb_i2c_gpio_device = { .name = "i2c-gpio", .id = 6, .dev = { .platform_data = &fg_smb_i2c_gpio_data, }, }; static struct max17043_platform_data max17043_pdata = { .power_supply_register = max17040_power_supply_register, .power_supply_unregister = max17040_power_supply_unregister, .rcomp_value = 0xB01F, .psoc_full = 9400, // 94% .psoc_empty = 20, // 0.2% }; static struct i2c_board_info fg_smb_i2c_devices[] = { { I2C_BOARD_INFO("max17043", 0x6D>>1), .platform_data = &max17043_pdata, .irq = MSM_GPIO_TO_INT(180), }, #ifdef CONFIG_CHARGER_SMB328A { I2C_BOARD_INFO("smb328a", (0x69 >> 1)), .platform_data = &smb328a_pdata, .irq = MSM_GPIO_TO_INT(18), }, #endif }; #endif static struct msm_gpio qup_i2c_gpios_io[] = { { GPIO_CFG(60, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_scl" }, { GPIO_CFG(61, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_sda" }, { GPIO_CFG(131, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_scl" }, { GPIO_CFG(132, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_sda" }, }; static struct msm_gpio qup_i2c_gpios_hw[] = { { GPIO_CFG(60, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_scl" }, { GPIO_CFG(61, 1, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_sda" }, { GPIO_CFG(131, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_scl" }, { GPIO_CFG(132, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "qup_sda" }, }; static void gsbi_qup_i2c_gpio_config(int adap_id, int config_type) { int rc; if (adap_id < 0 || adap_id > 1) return; /* Each adapter gets 2 lines from the table */ if (config_type) rc = msm_gpios_request_enable(&qup_i2c_gpios_hw[adap_id*2], 2); else rc = msm_gpios_request_enable(&qup_i2c_gpios_io[adap_id*2], 2); if (rc < 0) pr_err("QUP GPIO request/enable failed: %d\n", rc); } static struct msm_i2c_platform_data msm_gsbi0_qup_i2c_pdata = { .clk_freq = 100000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; static struct msm_i2c_platform_data msm_gsbi1_qup_i2c_pdata = { .clk_freq = 100000, .msm_i2c_config_gpio = gsbi_qup_i2c_gpio_config, }; #ifdef CONFIG_ARCH_MSM7X27A #if defined(_CONFIG_MACH_JENA) || defined(_CONFIG_MACH_TREBON) #define MSM_PMEM_MDP_SIZE 0x2300000 #define MSM_PMEM_ADSP_SIZE 0x1100000 #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER /* prim = 320 x 480 x 4(bpp) x 3(pages) */ #define MSM_FB_SIZE 320 * 480 * 4 * 3 #else /* prim = 320 x 480 x 4(bpp) x 2(pages) */ #define MSM_FB_SIZE 320 * 480 * 4 * 2 #endif /* CONFIG_FB_MSM_TRIPLE_BUFFER */ #else #define MSM_PMEM_MDP_SIZE 0x1DD1000 #define MSM_PMEM_ADSP_SIZE 0x1000000 #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER /* prim = 320 x 480 x 4(bpp) x 3(pages) */ #define MSM_FB_SIZE (320 * 480 * 4 * 3) #else /* prim = 320 x 480 x 4(bpp) x 2(pages) */ #define MSM_FB_SIZE (320 * 480 * 4 * 2) #endif /* CONFIG_FB_MSM_TRIPLE_BUFFER */ /*#define MSM_FB_SIZE 0x195000*/ #endif /* defined(_CONFIG_MACH_JENA) || ... */ #endif /* CONFIG_ARCH_MSM7X27A */ static struct android_usb_platform_data android_usb_pdata = { .update_pid_and_serial_num = usb_diag_update_pid_and_serial_num, }; static struct platform_device android_usb_device = { .name = "android_usb", .id = -1, .dev = { .platform_data = &android_usb_pdata, }, }; static int __init boot_mode_boot(char *onoff) { if (strcmp(onoff, "batt") == 0) { charging_boot = 1; fota_boot = 0; pr_info("%s[BATT]charging_boot: %d\n", __func__, charging_boot); } else if (strcmp(onoff, "fota") == 0) { fota_boot = 1; charging_boot = 0; } else { charging_boot = 0; fota_boot = 0; } return 1; } __setup("androidboot.boot_pause=", boot_mode_boot); #ifdef CONFIG_USB_EHCI_MSM_72K static void msm_hsusb_vbus_power(unsigned phy_info, int on) { int rc = 0; unsigned gpio; gpio = GPIO_HOST_VBUS_EN; rc = gpio_request(gpio, "i2c_host_vbus_en"); if (rc < 0) { pr_err("failed to request %d GPIO\n", gpio); return; } gpio_direction_output(gpio, !!on); gpio_set_value_cansleep(gpio, !!on); gpio_free(gpio); } static struct msm_usb_host_platform_data msm_usb_host_pdata = { .phy_info = (USB_PHY_INTEGRATED | USB_PHY_MODEL_45NM), }; static void __init msm7x2x_init_host(void) { msm_add_host(0, &msm_usb_host_pdata); } #endif #ifdef CONFIG_USB_MSM_OTG_72K static int hsusb_rpc_connect(int connect) { if (connect) return msm_hsusb_rpc_connect(); else return msm_hsusb_rpc_close(); } static struct regulator *reg_hsusb; static int msm_hsusb_ldo_init(int init) { int rc = 0; if (init) { reg_hsusb = regulator_get(NULL, "usb"); if (IS_ERR(reg_hsusb)) { rc = PTR_ERR(reg_hsusb); pr_err("%s: sandeep could not get regulator: %d\n", __func__, rc); goto out; } rc = regulator_set_voltage(reg_hsusb, 3300000, 3300000); if (rc) { pr_err("%s:sandeep could not set voltage: %d\n", __func__, rc); goto reg_free; } return 0; } /* else fall through */ reg_free: regulator_put(reg_hsusb); out: reg_hsusb = NULL; return rc; } static int msm_hsusb_ldo_enable(int enable) { static int ldo_status; if (IS_ERR_OR_NULL(reg_hsusb)) return reg_hsusb ? PTR_ERR(reg_hsusb) : -ENODEV; if (ldo_status == enable) return 0; ldo_status = enable; return enable ? regulator_enable(reg_hsusb) : regulator_disable(reg_hsusb); } #ifndef CONFIG_USB_EHCI_MSM_72K static int msm_hsusb_pmic_notif_init(void (*callback)(int online), int init) { int ret = 0; if (init) ret = msm_pm_app_rpc_init(callback); else msm_pm_app_rpc_deinit(callback); return ret; } #endif #if defined(CONFIG_MACH_AMAZING_CDMA) static int checkChgMode() { return charging_boot; } #endif static struct msm_otg_platform_data msm_otg_pdata = { #ifndef CONFIG_USB_EHCI_MSM_72K .pmic_vbus_notif_init = msm_hsusb_pmic_notif_init, #else .vbus_power = msm_hsusb_vbus_power, #endif .rpc_connect = hsusb_rpc_connect, .pemp_level = PRE_EMPHASIS_WITH_20_PERCENT, .cdr_autoreset = CDR_AUTO_RESET_DISABLE, .drv_ampl = HS_DRV_AMPLITUDE_75_PERCENT, .se1_gating = SE1_GATING_DISABLE, .ldo_init = msm_hsusb_ldo_init, .ldo_enable = msm_hsusb_ldo_enable, .chg_init = hsusb_chg_init, /* check charger cable type for USB phy off */ // .chg_connect_type = checkChargerType, /* XXX: block charger current setting */ #if defined(CONFIG_MACH_AMAZING_CDMA) .chg_mode_check = checkChgMode, #endif #if !defined(CONFIG_MACH_AMAZING_CDMA) .chg_connected = hsusb_chg_connected, .chg_vbus_draw = hsusb_chg_vbus_draw, #endif }; #endif static struct msm_hsusb_gadget_platform_data msm_gadget_pdata = { .is_phy_status_timer_on = 1, }; static struct resource smc91x_resources[] = { [0] = { .start = 0x90000300, .end = 0x900003ff, .flags = IORESOURCE_MEM, }, [1] = { .start = MSM_GPIO_TO_INT(4), .end = MSM_GPIO_TO_INT(4), .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; #define WLAN_HOST_WAKE #ifdef WLAN_HOST_WAKE struct wlansleep_info { unsigned host_wake; unsigned host_wake_irq; struct wake_lock wake_lock; }; static struct wlansleep_info *wsi; static struct tasklet_struct hostwake_task; static void wlan_hostwake_task(unsigned long data) { printk(KERN_INFO "WLAN: wake lock timeout 0.5 sec...\n"); wake_lock_timeout(&wsi->wake_lock, HZ / 2); } static irqreturn_t wlan_hostwake_isr(int irq, void *dev_id) { //please fix gpio_clear_detect_status(wsi->host_wake_irq); /* schedule a tasklet to handle the change in the host wake line */ tasklet_schedule(&hostwake_task); return IRQ_HANDLED; } static int wlan_host_wake_init(void) { int ret; wsi = kzalloc(sizeof(struct wlansleep_info), GFP_KERNEL); if (!wsi) return -ENOMEM; wake_lock_init(&wsi->wake_lock, WAKE_LOCK_SUSPEND, "bluesleep"); tasklet_init(&hostwake_task, wlan_hostwake_task, 0); wsi->host_wake = GPIO_WLAN_HOST_WAKE; wsi->host_wake_irq = MSM_GPIO_TO_INT(wsi->host_wake); gpio_tlmm_config(GPIO_CFG(GPIO_WLAN_HOST_WAKE, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG_ENABLE); //please fix gpio_configure(wsi->host_wake, GPIOF_INPUT); ret = request_irq(wsi->host_wake_irq, wlan_hostwake_isr, IRQF_DISABLED | IRQF_TRIGGER_RISING, "wlan hostwake", NULL); if (ret < 0) { printk(KERN_ERR "WLAN: Couldn't acquire WLAN_HOST_WAKE IRQ"); return -1; } ret = enable_irq_wake(wsi->host_wake_irq); if (ret < 0) { printk(KERN_ERR "WLAN: Couldn't enable WLAN_HOST_WAKE as wakeup interrupt"); free_irq(wsi->host_wake_irq, NULL); return -1; } return 0; } static void wlan_host_wake_exit(void) { if (disable_irq_wake(wsi->host_wake_irq)) printk(KERN_ERR "WLAN: Couldn't disable hostwake IRQ wakeup mode \n"); free_irq(wsi->host_wake_irq, NULL); wake_lock_destroy(&wsi->wake_lock); kfree(wsi); } #endif /* WLAN_HOST_WAKE */ static int wlan_set_gpio(unsigned gpio, int on) { int rc = 0; int gpio_value = 0; printk("%s - %d : %s\n", __func__, gpio, on ? "on" : "off"); // Request if (gpio_request(gpio, "wlan_ar6000_pm")) { printk(KERN_ERR "%s: gpio_request for %d failed\n", __func__, gpio); return -1; } gpio_value = gpio_get_value(gpio); printk(KERN_INFO "%s: before (%d) :: gpio_get_value = %d", __func__, on, gpio_value); // Direction Output On/Off rc = gpio_direction_output(gpio, on); gpio_free(gpio); gpio_value = gpio_get_value(gpio); printk(KERN_INFO "%s: after (%d) :: gpio_get_value = %d", __func__, on, gpio_value); if (rc) { printk(KERN_ERR "%s: gpio_direction_output for %d failed\n", __func__, gpio); return -1; } return 0; } #ifdef ENABLE_WLAN_LDO int wlan_enable_ldo_33v(int on) { int rc = 0, min_level = 2800000, max_level = 2800000; printk(KERN_INFO "%s %s --enter\n", __func__, on ? "on" : "down"); if (!on) { goto reg_disable; } wlan_reg = regulator_get(NULL, "wlan"); if (IS_ERR_OR_NULL(wlan_reg)) { rc = PTR_ERR(wlan_reg); dev_err(NULL, "%s: could not get regulator %s: %d\n", __func__, "wlan", rc); goto reg_get_fail; } if (IS_ERR_OR_NULL(wlan_reg)) { rc = wlan_reg ? PTR_ERR(wlan_reg) : -ENODEV; dev_err(NULL, "%s: invalid regulator handle for %s: %d\n", __func__, "wlan", rc); goto reg_disable; } rc = on ? regulator_set_voltage(wlan_reg, min_level, max_level) : 0; if (rc) { dev_err(NULL, "%s: could not set voltage for %s: %d\n", __func__, "wlan", rc); goto reg_disable; } rc = on ? regulator_enable(wlan_reg) : 0; if (rc) { dev_err(NULL, "%s: could not %sable regulator %s: %d\n", __func__, "en", "wlan", rc); goto reg_disable; } return rc; reg_get_fail: printk(KERN_ERR "%s %s --regulator get-fail\n", __func__, on ? "on" : "down"); regulator_put(wlan_reg); wlan_reg = NULL; return WLAN_ERROR; reg_disable: printk(KERN_INFO "%s %s --reg_disable\n", __func__, on ? "on" : "down"); if (!IS_ERR_OR_NULL(wlan_reg)) { regulator_disable(wlan_reg); regulator_put(wlan_reg); wlan_reg = NULL; } return rc; } #endif #ifdef WLAN_33V_CONTROL_FOR_BT_ANTENNA int wlan_setup_ldo_33v(int input_flag, int on) { int skip = 0; int temp_flag = wlan_33v_flag; printk(KERN_INFO "%s - set by %s : %s\n", __func__, (input_flag == WLAN_33V_WIFI_FLAG) ? "Wifi" : "BT", on ? "on" : "off"); printk(KERN_INFO "%s - old wlan_33v_flag : %d\n", __func__, temp_flag); if (on) { if (temp_flag) /* Already On */ skip = 1; temp_flag |= input_flag; } else { temp_flag &= (~input_flag); /* Keep GPIO_WLAN_33V_EN on if either BT or Wifi is turned on*/ if (temp_flag) skip = 1; } printk(KERN_INFO "%s - new wlan_33v_flag : %d\n", __func__, temp_flag); if (skip) { printk(KERN_INFO "%s - Skip GPIO_WLAN_33V_EN %s\n", __func__, on ? "on" : "off"); } else { /* GPIO_WLAN_33V_EN - On / Off */ if (wlan_set_gpio(GPIO_WLAN_33V_EN, on)) return WLAN_ERROR; } wlan_33v_flag = temp_flag; return WLAN_OK; } #endif void wlan_setup_power(int on, int detect) { printk("%s %s --enter\n", __func__, on ? "on" : "down"); if (on) { #ifdef ENABLE_WLAN_LDO if (wlan_enable_ldo_33v(on)) return; #endif udelay(60); // GPIO_WLAN_RESET_N - On if (wlan_set_gpio(GPIO_WLAN_RESET_N, 1)) return; #ifdef WLAN_HOST_WAKE wlan_host_wake_init(); #endif /* WLAN_HOST_WAKE */ } else { #ifdef WLAN_HOST_WAKE wlan_host_wake_exit(); #endif /* WLAN_HOST_WAKE */ // GPIO_WLAN_RESET_N - Off if (wlan_set_gpio(GPIO_WLAN_RESET_N, 0)) return; udelay(60); #ifdef ENABLE_WLAN_LDO if (wlan_enable_ldo_33v(on)) return; #endif } #ifndef ATH_POLLING mdelay(100); if (detect) { /* Detect card */ if (wlan_status_notify_cb) wlan_status_notify_cb(on, wlan_devid); else printk(KERN_ERR "WLAN: No notify available\n"); } #endif /* ATH_POLLING */ } EXPORT_SYMBOL(wlan_setup_power); EXPORT_SYMBOL(board_hw_revision); static int wlan_power_init(void) { #ifdef WLAN_33V_CONTROL_FOR_BT_ANTENNA wlan_33v_flag = 0; #endif /* Set config - GPIO_WLAN_33V_EN */ if (gpio_tlmm_config(GPIO_CFG(GPIO_WLAN_33V_EN, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE)) { printk(KERN_ERR "%s: gpio_tlmm_config for %d failed\n", __func__, GPIO_WLAN_33V_EN); return WLAN_ERROR; } return WLAN_OK; } #if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\ || defined(CONFIG_MMC_MSM_SDC4_SUPPORT)) static unsigned long vreg_sts, gpio_sts; static struct vreg *vreg_mmc; static struct vreg *vreg_emmc; struct sdcc_vreg { struct vreg *vreg_data; unsigned level; }; static struct sdcc_vreg sdcc_vreg_data[4]; struct sdcc_gpio { struct msm_gpio *cfg_data; uint32_t size; struct msm_gpio *sleep_cfg_data; }; /** * Due to insufficient drive strengths for SDC GPIO lines some old versioned * SD/MMC cards may cause data CRC errors. Hence, set optimal values * for SDC slots based on timing closure and marginality. SDC1 slot * require higher value since it should handle bad signal quality due * to size of T-flash adapters. */ static struct msm_gpio sdc1_cfg_data[] = { {GPIO_CFG(51, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_14MA), "sdc1_dat_3"}, {GPIO_CFG(52, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_14MA), "sdc1_dat_2"}, {GPIO_CFG(53, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_14MA), "sdc1_dat_1"}, {GPIO_CFG(54, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_14MA), "sdc1_dat_0"}, {GPIO_CFG(55, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_14MA), "sdc1_cmd"}, {GPIO_CFG(56, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_14MA), "sdc1_clk"}, }; static struct msm_gpio sdc2_cfg_data[] = { {GPIO_CFG(62, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc2_clk"}, {GPIO_CFG(63, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc2_cmd"}, {GPIO_CFG(64, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc2_dat_3"}, {GPIO_CFG(65, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc2_dat_2"}, {GPIO_CFG(66, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc2_dat_1"}, {GPIO_CFG(67, 2, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc2_dat_0"}, }; static struct msm_gpio sdc2_sleep_cfg_data[] = { {GPIO_CFG(62, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "sdc2_clk"}, {GPIO_CFG(63, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), "sdc2_cmd"}, {GPIO_CFG(64, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), "sdc2_dat_3"}, {GPIO_CFG(65, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), "sdc2_dat_2"}, {GPIO_CFG(66, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), "sdc2_dat_1"}, {GPIO_CFG(67, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), "sdc2_dat_0"}, }; static struct msm_gpio sdc3_cfg_data[] = { {GPIO_CFG(88, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc3_clk"}, {GPIO_CFG(89, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_cmd"}, {GPIO_CFG(90, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_3"}, {GPIO_CFG(91, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_2"}, {GPIO_CFG(92, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_1"}, {GPIO_CFG(93, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_0"}, #ifdef CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT {GPIO_CFG(19, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_7"}, {GPIO_CFG(20, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_6"}, {GPIO_CFG(21, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_5"}, {GPIO_CFG(108, 3, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc3_dat_4"}, #endif }; static struct msm_gpio sdc4_cfg_data[] = { {GPIO_CFG(19, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc4_dat_3"}, {GPIO_CFG(20, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc4_dat_2"}, {GPIO_CFG(21, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc4_dat_1"}, {GPIO_CFG(106, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc4_cmd"}, {GPIO_CFG(108, 1, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_10MA), "sdc4_dat_0"}, {GPIO_CFG(109, 1, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), "sdc4_clk"}, }; static struct sdcc_gpio sdcc_cfg_data[] = { { .cfg_data = sdc1_cfg_data, .size = ARRAY_SIZE(sdc1_cfg_data), }, { .cfg_data = sdc2_cfg_data, .size = ARRAY_SIZE(sdc2_cfg_data), .sleep_cfg_data = sdc2_sleep_cfg_data, }, { /* .cfg_data = sdc3_cfg_data, */ /* .size = ARRAY_SIZE(sdc3_cfg_data), */ }, { .cfg_data = sdc4_cfg_data, .size = ARRAY_SIZE(sdc4_cfg_data), }, }; static int msm_sdcc_setup_gpio(int dev_id, unsigned int enable) { int rc = 0; struct sdcc_gpio *curr; curr = &sdcc_cfg_data[dev_id - 1]; if (!(test_bit(dev_id, &gpio_sts)^enable)) return rc; if (enable) { set_bit(dev_id, &gpio_sts); rc = msm_gpios_request_enable(curr->cfg_data, curr->size); if (rc) pr_err("%s: Failed to turn on GPIOs for slot %d\n", __func__, dev_id); } else { clear_bit(dev_id, &gpio_sts); if (curr->sleep_cfg_data) { rc = msm_gpios_enable(curr->sleep_cfg_data, curr->size); msm_gpios_free(curr->sleep_cfg_data, curr->size); return rc; } msm_gpios_disable_free(curr->cfg_data, curr->size); } return rc; } static int msm_sdcc_setup_vreg(int dev_id, unsigned int enable) { int rc = 0; struct sdcc_vreg *curr; curr = &sdcc_vreg_data[dev_id - 1]; printk("%s : %d : %d : level : %d\n", __func__, dev_id, enable, curr->level); if (!(test_bit(dev_id, &vreg_sts)^enable)) return rc; if (enable) { set_bit(dev_id, &vreg_sts); rc = vreg_set_level(curr->vreg_data, curr->level); if (rc) pr_err("%s: vreg_set_level() = %d\n", __func__, rc); rc = vreg_enable(curr->vreg_data); if (rc) pr_err("%s: vreg_enable() = %d\n", __func__, rc); } else { clear_bit(dev_id, &vreg_sts); rc = vreg_disable(curr->vreg_data); if (rc) pr_err("%s: vreg_disable() = %d\n", __func__, rc); } return rc; } static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd) { int rc = 0; struct platform_device *pdev; pdev = container_of(dv, struct platform_device, dev); rc = msm_sdcc_setup_gpio(pdev->id, !!vdd); if (rc) goto out; rc = msm_sdcc_setup_vreg(pdev->id, !!vdd); out: return rc; } #define GPIO_SDC1_HW_DET 94 #if defined(CONFIG_MMC_MSM_SDC1_SUPPORT) \ && defined(CONFIG_MMC_MSM_CARD_HW_DETECTION) static unsigned int msm7x2xa_sdcc_slot_status(struct device *dev) { int status; printk("%s entered\n", __func__); status = gpio_tlmm_config(GPIO_CFG(GPIO_SDC1_HW_DET, 2, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), GPIO_CFG_ENABLE); if (status) pr_err("%s:Failed to configure tlmm for GPIO %d\n", __func__, GPIO_SDC1_HW_DET); status = gpio_request(GPIO_SDC1_HW_DET, "SD_HW_Detect"); if (status) { pr_err("%s:Failed to request GPIO %d\n", __func__, GPIO_SDC1_HW_DET); } else { status = gpio_direction_input(GPIO_SDC1_HW_DET); if (!status) status = gpio_get_value(GPIO_SDC1_HW_DET); gpio_free(GPIO_SDC1_HW_DET); } status = status?0:1 ; //PMMC printk("<=PMMC=> %s : status : %d \n", __func__, status); return status; } #endif #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT static struct mmc_platform_data sdc1_plat_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, #ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION .status = msm7x2xa_sdcc_slot_status, .status_irq = MSM_GPIO_TO_INT(GPIO_SDC1_HW_DET), .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, #endif }; #endif #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT static struct mmc_platform_data sdc2_plat_data = { /* * SDC2 supports only 1.8V, claim for 2.85V range is just * for allowing buggy cards who advertise 2.8V even though * they can operate at 1.8V supply. */ .ocr_mask = MMC_VDD_28_29 | MMC_VDD_165_195, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, #if 0 /* def CONFIG_MMC_MSM_SDIO_SUPPORT */ .sdiowakeup_irq = MSM_GPIO_TO_INT(66), #endif #ifndef ATH_POLLING .status = wlan_status, .register_status_notify = register_wlan_status_notify, #endif /* ATH_POLLING */ .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, //24576000, ///*144000,//*/ #ifdef CONFIG_MMC_MSM_SDC2_DUMMY52_REQUIRED .dummy52_required = 1, #endif }; #endif #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT static struct mmc_platform_data sdc3_plat_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, #ifdef CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT .mmc_bus_width = MMC_CAP_8_BIT_DATA, #else .mmc_bus_width = MMC_CAP_4_BIT_DATA, #endif .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, .nonremovable = 1, }; #endif #if (defined(CONFIG_MMC_MSM_SDC4_SUPPORT)\ && !defined(CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT)) static struct mmc_platform_data sdc4_plat_data = { .ocr_mask = MMC_VDD_28_29, .translate_vdd = msm_sdcc_setup_power, .mmc_bus_width = MMC_CAP_4_BIT_DATA, .msmsdcc_fmin = 144000, .msmsdcc_fmid = 24576000, .msmsdcc_fmax = 49152000, }; #endif #endif #ifdef CONFIG_SERIAL_MSM_HS static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = { .inject_rx_on_wakeup = 1, .rx_to_inject = 0xFD, }; #endif static struct msm_pm_platform_data msm7x27a_pm_data[MSM_PM_SLEEP_MODE_NR] = { [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 16000, .residency = 20000, }, [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 12000, .residency = 20000, }, [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 0, .suspend_enabled = 1, .latency = 2000, .residency = 0, }, [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = { .idle_supported = 1, .suspend_supported = 1, .idle_enabled = 1, .suspend_enabled = 1, .latency = 2, .residency = 0, }, }; u32 msm7627a_power_collapse_latency(enum msm_pm_sleep_mode mode) { switch (mode) { case MSM_PM_SLEEP_MODE_POWER_COLLAPSE: return msm7x27a_pm_data [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency; case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN: return msm7x27a_pm_data [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency; case MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT: return msm7x27a_pm_data [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency; case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT: return msm7x27a_pm_data [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency; default: return 0; } } static struct msm_pm_boot_platform_data msm_pm_boot_pdata __initdata = { .mode = MSM_PM_BOOT_CONFIG_RESET_VECTOR_PHYS, .p_addr = 0, }; static struct android_pmem_platform_data android_pmem_adsp_pdata = { .name = "pmem_adsp", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 1, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_adsp_device = { .name = "android_pmem", .id = 1, .dev = { .platform_data = &android_pmem_adsp_pdata }, }; static unsigned pmem_mdp_size = MSM_PMEM_MDP_SIZE; static int __init pmem_mdp_size_setup(char *p) { pmem_mdp_size = memparse(p, NULL); return 0; } early_param("pmem_mdp_size", pmem_mdp_size_setup); static unsigned pmem_adsp_size = MSM_PMEM_ADSP_SIZE; static int __init pmem_adsp_size_setup(char *p) { pmem_adsp_size = memparse(p, NULL); return 0; } early_param("pmem_adsp_size", pmem_adsp_size_setup); static unsigned fb_size = MSM_FB_SIZE; static int __init fb_size_setup(char *p) { fb_size = memparse(p, NULL); return 0; } early_param("fb_size", fb_size_setup); static const char * const msm_fb_lcdc_vreg[] = { "gp2", "msme1", }; static const int msm_fb_lcdc_vreg_mV[] = { 2850, 1800, }; #define LCDC_CONFIG_PROC 21 #define LCDC_UN_CONFIG_PROC 22 #define LCDC_API_PROG 0x30000066 #define LCDC_API_VERS 0x00010001 #define GPIO_SPI_CLK 30 #define GPIO_SPI_CS 26 #define GPIO_SPI_SDI 57 #define GPIO_SPI_SDO 23 #define GPIO_LCD_RESET_N 22 #define GPIO_LCD_DETECT 38 struct vreg *lcdc_vreg[ARRAY_SIZE(msm_fb_lcdc_vreg)]; #if 0 // toshiba panel static uint32_t lcdc_gpio_initialized; static void lcdc_toshiba_gpio_init(void) { int i, rc = 0; if (!lcdc_gpio_initialized) { if (gpio_request(GPIO_SPI_CLK, "spi_clk")) { pr_err("failed to request gpio spi_clk\n"); return; } if (gpio_request(GPIO_SPI_CS0_N, "spi_cs")) { pr_err("failed to request gpio spi_cs0_N\n"); goto fail_gpio6; } if (gpio_request(GPIO_SPI_MOSI, "spi_mosi")) { pr_err("failed to request gpio spi_mosi\n"); goto fail_gpio5; } if (gpio_request(GPIO_SPI_MISO, "spi_miso")) { pr_err("failed to request gpio spi_miso\n"); goto fail_gpio4; } if (gpio_request(GPIO_DISPLAY_PWR_EN, "gpio_disp_pwr")) { pr_err("failed to request gpio_disp_pwr\n"); goto fail_gpio3; } if (gpio_request(GPIO_BACKLIGHT_EN, "gpio_bkl_en")) { pr_err("failed to request gpio_bkl_en\n"); goto fail_gpio2; } pmapp_disp_backlight_init(); for (i = 0; i < ARRAY_SIZE(msm_fb_lcdc_vreg); i++) { lcdc_vreg[i] = vreg_get(0, msm_fb_lcdc_vreg[i]); rc = vreg_set_level(lcdc_vreg[i], msm_fb_lcdc_vreg_mV[i]); if (rc < 0) { pr_err("%s: set regulator level failed " "with :(%d)\n", __func__, rc); goto fail_gpio1; } } lcdc_gpio_initialized = 1; } return; fail_gpio1: for (; i > 0; i--) vreg_put(lcdc_vreg[i - 1]); gpio_free(GPIO_BACKLIGHT_EN); fail_gpio2: gpio_free(GPIO_DISPLAY_PWR_EN); fail_gpio3: gpio_free(GPIO_SPI_MISO); fail_gpio4: gpio_free(GPIO_SPI_MOSI); fail_gpio5: gpio_free(GPIO_SPI_CS0_N); fail_gpio6: gpio_free(GPIO_SPI_CLK); lcdc_gpio_initialized = 0; } static uint32_t lcdc_gpio_table[] = { GPIO_SPI_CLK, GPIO_SPI_CS0_N, GPIO_SPI_MOSI, GPIO_DISPLAY_PWR_EN, GPIO_BACKLIGHT_EN, GPIO_SPI_MISO, }; static void config_lcdc_gpio_table(uint32_t *table, int len, unsigned enable) { int n; if (lcdc_gpio_initialized) { /* All are IO Expander GPIOs */ for (n = 0; n < (len - 1); n++) gpio_direction_output(table[n], 1); } } static void lcdc_toshiba_config_gpios(int enable) { config_lcdc_gpio_table(lcdc_gpio_table, ARRAY_SIZE(lcdc_gpio_table), enable); } #endif static int lcdc_gpio_num[] = { GPIO_SPI_CLK, GPIO_SPI_CS, GPIO_SPI_SDI, GPIO_LCD_RESET_N, GPIO_LCD_DETECT, GPIO_SPI_SDO, }; static uint32_t lcdc_amazing_gpio_init_table[] = { GPIO_CFG(GPIO_LCD_MCLK, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_DETECT, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_VSYNC, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_HSYNC, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_R_0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_R_1, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_R_2, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_R_3, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCDR_4, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_R_5, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_G_0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_G_1, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCDE_G_2, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_G_3, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_G_4, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_G_5, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_B_0, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_B_1, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_B_2, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_B_3, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_B_4, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_B_5, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA), }; static void lcdc_trebon_gpio_init(void) { int rc, n; if (gpio_request(GPIO_SPI_CLK, "spi_clk")) { pr_err("failed to request gpio spi_clk\n"); } if (gpio_request(GPIO_SPI_CS, "spi_cs")) { pr_err("failed to request gpio spi_cs\n"); } if (gpio_request(GPIO_SPI_SDI, "spi_mosi")) { pr_err("failed to request gpio spi_sdi\n"); } if (gpio_request(GPIO_LCD_RESET_N, "gpio_lcd_reset_n")) { pr_err("failed to request gpio lcd_reset_n\n"); } if (gpio_request(GPIO_SPI_SDO, "gpio_spi_sdo")) { pr_err("failed to request gpio spi_sdo\n"); } for (n = 0; n < sizeof(lcdc_amazing_gpio_init_table) / sizeof(uint32_t); n++) { rc = gpio_tlmm_config(lcdc_amazing_gpio_init_table[n], GPIO_CFG_ENABLE); if (rc) { printk(KERN_ERR "%s: lcdc_amazing_config(%#x)=%d\n", __func__, lcdc_amazing_gpio_init_table[n], rc); break; } } rc = gpio_request(GPIO_LCD_DETECT, "gpio_lcd_detect"); if (rc) { pr_err("%s: unable to request gpio %d\n", __func__, GPIO_LCD_DETECT); } rc = gpio_direction_input(GPIO_LCD_DETECT); if (rc < 0) { pr_err("%s: unable to set the direction of gpio %d\n", __func__, GPIO_LCD_DETECT); } return; } static uint32_t lcdc_gpio_table[] = { GPIO_CFG(GPIO_SPI_CLK, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_SPI_CS, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_SPI_SDI, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_LCD_RESET_N, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG(GPIO_SPI_SDO, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), }; static void config_lcdc_gpio_table(uint32_t *table, int len, unsigned enable) { int n, rc; for (n = 0; n < len; n++) { rc = gpio_tlmm_config(table[n], enable ? GPIO_CFG_ENABLE : GPIO_CFG_DISABLE); if (rc) { printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n", __func__, table[n], rc); break; } } } static int msm_fb_lcdc_power_save(int on) { #if 0 /* struct vreg *vreg[ARRAY_SIZE(msm_fb_lcdc_vreg)]; */ int rc = 0; /* Doing the init of the LCDC GPIOs very late as they are from an I2C-controlled IO Expander */ lcdc_toshiba_gpio_init(); if (lcdc_gpio_initialized) { gpio_set_value_cansleep(GPIO_DISPLAY_PWR_EN, on); gpio_set_value_cansleep(GPIO_BACKLIGHT_EN, on); } pmapp_disp_backlight_init(); rc = pmapp_disp_backlight_set_brightness(100); #endif return 0; } static struct lcdc_platform_data lcdc_pdata = { .lcdc_gpio_config = NULL, .lcdc_power_save = msm_fb_lcdc_power_save, }; static struct resource lcdc_trebon_resources[] = { { .name = "lcd_breakdown_det", .start = MSM_GPIO_TO_INT(GPIO_LCD_DETECT), .end = MSM_GPIO_TO_INT(GPIO_LCD_DETECT), .flags = IORESOURCE_IRQ, } }; static void lcdc_trebon_config_gpios(int enable) { config_lcdc_gpio_table(lcdc_gpio_table, ARRAY_SIZE(lcdc_gpio_table), enable); } static struct msm_panel_common_pdata lcdc_trebon_panel_data = { .panel_config_gpio = lcdc_trebon_config_gpios, .gpio_num = lcdc_gpio_num, }; static struct platform_device lcdc_trebon_panel_device = { #if defined(CONFIG_FB_MSM_LCDC_TREBON_HVGA) .name = "lcdc_trebon_hvga", .num_resources = ARRAY_SIZE(lcdc_trebon_resources), .resource = lcdc_trebon_resources, #else .name = "lcdc_s6d16a0x_hvga", #endif .id = 0, .dev = { .platform_data = &lcdc_trebon_panel_data, } }; #ifdef CONFIG_TOUCHSCREEN_MELFAS_TS static struct tsp_callbacks * tsp_charger_callbacks; static void vital2_inform_charger_connection(int mode) { if (tsp_charger_callbacks && tsp_charger_callbacks->inform_charger) tsp_charger_callbacks->inform_charger(tsp_charger_callbacks, mode); }; static void register_tsp_callbacks(struct tsp_callbacks *cb) { tsp_charger_callbacks = cb; } static struct melfas_platform_data melfas_data = { .register_cb = register_tsp_callbacks, }; static struct platform_device touchscreen_device_melfas = { .name = "melfas-ts", .id = -1, .dev = { .platform_data = &melfas_data, }, }; static struct i2c_gpio_platform_data touch_i2c_gpio_data = { .sda_pin = GPIO_TSP_SDA, .scl_pin = GPIO_TSP_SCL, .udelay = 1, }; static struct platform_device touch_i2c_gpio_device = { .name = "i2c-gpio", .id = 2, .dev = { .platform_data = &touch_i2c_gpio_data, }, }; /* I2C 2 */ static struct i2c_board_info touch_i2c_devices[] = { // { // I2C_BOARD_INFO("zinitix_isp", 0x50), // }, { I2C_BOARD_INFO("melfas_ts_i2c", MELFAS_TSP_ADDR), .irq = MSM_GPIO_TO_INT( GPIO_TOUCH_IRQ ), }, }; #endif #ifdef CONFIG_TOUCHSCREEN_ZINITIX_TREBON static void tsp_power_on(void) { int rc = 0; printk("[TSP] %s start \n", __func__); #if (CONFIG_MACH_TREBON_HWREV == 0x0) rc = gpio_request(41, "touch_en"); #else rc = gpio_request(78, "touch_en"); #endif if (rc < 0) { pr_err("failed to request touch_en\n"); } #if (CONFIG_MACH_TREBON_HWREV == 0x0) gpio_tlmm_config(GPIO_CFG(41, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_direction_output(41, 1); #else gpio_tlmm_config(GPIO_CFG(78, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_direction_output(78, 1); #endif // printk("[TSP] touch_en : %d \n", gpio_get_value(78)); } #endif struct class *sec_class; EXPORT_SYMBOL(sec_class); static void samsung_sys_class_init(void) { pr_info("samsung sys class init.\n"); sec_class = class_create(THIS_MODULE, "sec"); if (IS_ERR(sec_class)) pr_err("Failed to create class(sec) !\n"); } #if 0 // toshiba panel static int lcd_panel_spi_gpio_num[] = { GPIO_SPI_MOSI, /* spi_sdi */ GPIO_SPI_MISO, /* spi_sdoi */ GPIO_SPI_CLK, /* spi_clk */ GPIO_SPI_CS0_N, /* spi_cs */ }; static struct msm_panel_common_pdata lcdc_toshiba_panel_data = { .panel_config_gpio = lcdc_toshiba_config_gpios, .pmic_backlight = lcdc_toshiba_set_bl, .gpio_num = lcd_panel_spi_gpio_num, }; static struct platform_device lcdc_toshiba_panel_device = { .name = "lcdc_toshiba_fwvga_pt", .id = 0, .dev = { .platform_data = &lcdc_toshiba_panel_data, } }; #endif static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; static int msm_fb_detect_panel(const char *name) { int ret = -EPERM; #if defined(CONFIG_FB_MSM_LCDC_S6D16A0X_HVGA) if (!strcmp(name, "lcdc_s6d16a0x_hvga")) ret = 0; else ret = -ENODEV; #elif defined(CONFIG_FB_MSM_LCDC_TREBON_HVGA) if (!strcmp(name, "lcdc_trebon_hvga")) ret = 0; else ret = -ENODEV; #else #if 0 // toshiba panel if (machine_is_msm7x27a_surf()) { if (!strncmp(name, "lcdc_toshiba_fwvga_pt", 21)) ret = 0; } else { ret = -ENODEV; } #endif #endif return ret; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev = { .platform_data = &msm_fb_pdata, } }; #ifdef CONFIG_FB_MSM_MIPI_DSI static int mipi_renesas_set_bl(int level) { int ret; ret = pmapp_disp_backlight_set_brightness(level); if (ret) pr_err("%s: can't set lcd backlight!\n", __func__); return ret; } static struct msm_panel_common_pdata mipi_renesas_pdata = { .pmic_backlight = mipi_renesas_set_bl, }; static struct platform_device mipi_dsi_renesas_panel_device = { .name = "mipi_renesas", .id = 0, .dev = { .platform_data = &mipi_renesas_pdata, } }; #endif static void __init msm7x27a_init_mmc(void) { vreg_emmc = vreg_get(NULL,"msme1"); if (IS_ERR(vreg_emmc)) { pr_err("%s: vreg get failed (%ld)\n", __func__, PTR_ERR(vreg_emmc)); return; } vreg_mmc = vreg_get(NULL,"mmc"); if (IS_ERR(vreg_mmc)) { pr_err("%s: vreg get failed (%ld)\n", __func__, PTR_ERR(vreg_mmc)); return; } /* eMMC slot */ #ifdef CONFIG_MMC_MSM_SDC3_SUPPORT sdcc_vreg_data[2].vreg_data = vreg_emmc; sdcc_vreg_data[2].level = 1800; msm_add_sdcc(3, &sdc3_plat_data); #endif /* Micro-SD slot */ #ifdef CONFIG_MMC_MSM_SDC1_SUPPORT sdcc_vreg_data[0].vreg_data = vreg_mmc; sdcc_vreg_data[0].level = 2850; msm_add_sdcc(1, &sdc1_plat_data); #endif /* SDIO WLAN slot */ #ifdef CONFIG_MMC_MSM_SDC2_SUPPORT //sdcc_vreg_data[1].vreg_data = vreg_mmc; sdcc_vreg_data[1].vreg_data = vreg_emmc; sdcc_vreg_data[1].level = 1800/*2850*/; msm_add_sdcc(2, &sdc2_plat_data); #endif /* Not Used */ #if (defined(CONFIG_MMC_MSM_SDC4_SUPPORT)\ && !defined(CONFIG_MMC_MSM_SDC3_8_BIT_SUPPORT)) sdcc_vreg_data[3].vreg_data = vreg_mmc; sdcc_vreg_data[3].level = 2850; msm_add_sdcc(4, &sdc4_plat_data); #endif } #define SND(desc, num) { .name = #desc, .id = num } static struct snd_endpoint snd_endpoints_list[] = { SND(HANDSET, 0), SND(MONO_HEADSET, 2), SND(HEADSET, 3), SND(SPEAKER, 6), SND(TTY_HEADSET, 8), SND(TTY_VCO, 9), SND(TTY_HCO, 10), SND(BT, 12), SND(IN_S_SADC_OUT_HANDSET, 16), SND(VOICE_RECOGNITION, 24), SND(FM_DIGITAL_STEREO_HEADSET, 26), SND(FM_DIGITAL_SPEAKER_PHONE, 27), SND(FM_DIGITAL_BT_A2DP_HEADSET, 28), SND(FM_STEREO_HEADSET, 29), SND(FM_SPEAKER_PHONE, 30), SND(STEREO_HEADSET_AND_SPEAKER, 31), SND(HEADSET_AND_SPEAKER, 32), SND(STEREO_HEADSET_3POLE, 34), SND(MP3_SPEAKER_PHONE, 35), SND(MP3_STEREO_HEADSET, 36), SND(BT_NSEC_OFF, 37), SND(HANDSET_VOIP, 38), SND(STEREO_HEADSET_VOIP, 39), SND(SPEAKER_VOIP, 40), SND(BT_VOIP, 41), SND(HANDSET_VOIP2, 42), SND(STEREO_HEADSET_VOIP2, 43), SND(SPEAKER_VOIP2, 44), SND(BT_VOIP2, 45), SND(VOICE_RECORDER_HPH, 46), SND(VOICE_RECORDER_SPK, 47), SND(FM_ANALOG_STEREO_HEADSET, 50), SND(FM_ANALOG_STEREO_HEADSET_CODEC, 51), SND(CURRENT, 0x7FFFFFFE), }; #undef SND static struct msm_snd_endpoints msm_device_snd_endpoints = { .endpoints = snd_endpoints_list, .num = sizeof(snd_endpoints_list) / sizeof(struct snd_endpoint) }; static struct platform_device msm_device_snd = { .name = "msm_snd", .id = -1, .dev = { .platform_data = &msm_device_snd_endpoints }, }; #define DEC0_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC1_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC2_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC3_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC4_FORMAT (1<<MSM_ADSP_CODEC_MIDI) static unsigned int dec_concurrency_table[] = { /* Audio LP */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DMA)), 0, 0, 0, 0, /* Concurrency 1 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 2 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 3 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 4 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 5 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 6 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), 0, 0, 0, 0, /* Concurrency 7 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), }; #define DEC_INFO(name, queueid, decid, nr_codec) { .module_name = name, \ .module_queueid = queueid, .module_decid = decid, \ .nr_codec_support = nr_codec} static struct msm_adspdec_info dec_info_list[] = { DEC_INFO("AUDPLAY0TASK", 13, 0, 11), /* AudPlay0BitStreamCtrlQueue */ DEC_INFO("AUDPLAY1TASK", 14, 1, 11), /* AudPlay1BitStreamCtrlQueue */ DEC_INFO("AUDPLAY2TASK", 15, 2, 11), /* AudPlay2BitStreamCtrlQueue */ DEC_INFO("AUDPLAY3TASK", 16, 3, 11), /* AudPlay3BitStreamCtrlQueue */ DEC_INFO("AUDPLAY4TASK", 17, 4, 1), /* AudPlay4BitStreamCtrlQueue */ }; static struct msm_adspdec_database msm_device_adspdec_database = { .num_dec = ARRAY_SIZE(dec_info_list), .num_concurrency_support = (ARRAY_SIZE(dec_concurrency_table) / \ ARRAY_SIZE(dec_info_list)), .dec_concurrency_table = dec_concurrency_table, .dec_info_list = dec_info_list, }; static struct platform_device msm_device_adspdec = { .name = "msm_adspdec", .id = -1, .dev = { .platform_data = &msm_device_adspdec_database }, }; static struct android_pmem_platform_data android_pmem_audio_pdata = { .name = "pmem_audio", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 0, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_audio_device = { .name = "android_pmem", .id = 2, .dev = { .platform_data = &android_pmem_audio_pdata }, }; static struct android_pmem_platform_data android_pmem_pdata = { .name = "pmem", .allocator_type = PMEM_ALLOCATORTYPE_BITMAP, .cached = 1, .memory_type = MEMTYPE_EBI1, }; static struct platform_device android_pmem_device = { .name = "android_pmem", .id = 0, .dev = { .platform_data = &android_pmem_pdata }, }; static struct smsc911x_platform_config smsc911x_config = { .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH, .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, .flags = SMSC911X_USE_16BIT, }; static struct resource smsc911x_resources[] = { [0] = { .start = 0x90000000, .end = 0x90007fff, .flags = IORESOURCE_MEM, }, [1] = { .start = MSM_GPIO_TO_INT(48), .end = MSM_GPIO_TO_INT(48), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = 0, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; #if 0 /*FSA Driver Porting: GPIO 49 and 50 used by MUS_SDA and MUS_SCL */ static struct msm_gpio smsc911x_gpios[] = { { GPIO_CFG(48, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_6MA), "smsc911x_irq" }, { GPIO_CFG(49, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_6MA), "eth_fifo_sel" }, }; #define ETH_FIFO_SEL_GPIO 49 static void msm7x27a_cfg_smsc911x(void) { int res; res = msm_gpios_request_enable(smsc911x_gpios, ARRAY_SIZE(smsc911x_gpios)); if (res) { pr_err("%s: unable to enable gpios for SMSC911x\n", __func__); return; } /* ETH_FIFO_SEL */ res = gpio_direction_output(ETH_FIFO_SEL_GPIO, 0); if (res) { pr_err("%s: unable to get direction for gpio %d\n", __func__, ETH_FIFO_SEL_GPIO); msm_gpios_disable_free(smsc911x_gpios, ARRAY_SIZE(smsc911x_gpios)); return; } gpio_set_value(ETH_FIFO_SEL_GPIO, 0); } #endif #if defined(CONFIG_SERIAL_MSM_HSL_CONSOLE) \ && defined(CONFIG_MSM_SHARED_GPIO_FOR_UART2DM) static struct msm_gpio uart2dm_gpios[] = { {GPIO_CFG(19, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "uart2dm_rfr_n" }, {GPIO_CFG(20, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "uart2dm_cts_n" }, {GPIO_CFG(21, 2, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "uart2dm_rx" }, {GPIO_CFG(108, 2, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), "uart2dm_tx" }, }; static void msm7x27a_cfg_uart2dm_serial(void) { int ret; ret = msm_gpios_request_enable(uart2dm_gpios, ARRAY_SIZE(uart2dm_gpios)); if (ret) pr_err("%s: unable to enable gpios for uart2dm\n", __func__); } #else static void msm7x27a_cfg_uart2dm_serial(void) { } #endif static struct platform_device *rumi_sim_devices[] __initdata = { &msm_device_dmov, &msm_device_smd, &smc91x_device, &msm_device_uart1, &msm_device_nand, &msm_device_uart_dm1, &msm_gsbi0_qup_i2c_device, &msm_gsbi1_qup_i2c_device }; static struct platform_device *surf_ffa_devices[] __initdata = { &msm_device_dmov, &msm_device_smd, &msm_device_uart1, &msm_device_uart_dm1, //&msm_device_uart_dm2, &msm_device_nand, &msm_gsbi0_qup_i2c_device, &msm_gsbi1_qup_i2c_device, &msm_device_otg, &msm_device_gadget_peripheral, &android_usb_device, &android_pmem_device, &android_pmem_adsp_device, &android_pmem_audio_device, &msm_device_snd, &msm_device_adspdec, &msm_fb_device, //&lcdc_toshiba_panel_device, &lcdc_trebon_panel_device, &msm_batt_device, // &smsc911x_device, #ifdef CONFIG_FB_MSM_MIPI_DSI &mipi_dsi_renesas_panel_device, #endif &msm_kgsl_3d0, #ifdef CONFIG_BT &msm_bt_power_device, #endif #ifdef CONFIG_TOUCHSCREEN_MELFAS_TS &touch_i2c_gpio_device, &touchscreen_device_melfas, #endif &fsa880_i2c_gpio_device, #ifdef CONFIG_BQ27425_FUEL_GAUGE &fuelgauge_i2c_gpio_device, #endif #ifdef CONFIG_MAX17043_FUELGAUGE &fg_smb_i2c_gpio_device, #endif #ifdef CONFIG_PROXIMITY_SENSOR &sensor_i2c_gpio_device, #endif #ifdef CONFIG_SAMSUNG_JACK &sec_device_jack, #endif &msm_device_pmic_leds, &msm_vibrator_device, &asoc_msm_pcm, &asoc_msm_dai0, &asoc_msm_dai1, }; static unsigned pmem_kernel_ebi1_size = PMEM_KERNEL_EBI1_SIZE; static int __init pmem_kernel_ebi1_size_setup(char *p) { pmem_kernel_ebi1_size = memparse(p, NULL); return 0; } early_param("pmem_kernel_ebi1_size", pmem_kernel_ebi1_size_setup); static unsigned pmem_audio_size = MSM_PMEM_AUDIO_SIZE; static int __init pmem_audio_size_setup(char *p) { pmem_audio_size = memparse(p, NULL); return 0; } early_param("pmem_audio_size", pmem_audio_size_setup); static void __init msm_msm7x2x_allocate_memory_regions(void) { void *addr; unsigned long size; size = fb_size ? : MSM_FB_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); } static struct memtype_reserve msm7x27a_reserve_table[] __initdata = { [MEMTYPE_SMI] = { }, [MEMTYPE_EBI0] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, [MEMTYPE_EBI1] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, }; static void __init size_pmem_devices(void) { #ifdef CONFIG_ANDROID_PMEM android_pmem_adsp_pdata.size = pmem_adsp_size; android_pmem_pdata.size = pmem_mdp_size; android_pmem_audio_pdata.size = pmem_audio_size; #endif } static void __init reserve_memory_for(struct android_pmem_platform_data *p) { msm7x27a_reserve_table[p->memory_type].size += p->size; } static void __init reserve_pmem_memory(void) { #ifdef CONFIG_ANDROID_PMEM reserve_memory_for(&android_pmem_adsp_pdata); reserve_memory_for(&android_pmem_pdata); reserve_memory_for(&android_pmem_audio_pdata); msm7x27a_reserve_table[MEMTYPE_EBI1].size += pmem_kernel_ebi1_size; #endif } static void __init msm7x27a_calculate_reserve_sizes(void) { size_pmem_devices(); reserve_pmem_memory(); } static int msm7x27a_paddr_to_memtype(unsigned int paddr) { return MEMTYPE_EBI1; } static struct reserve_info msm7x27a_reserve_info __initdata = { .memtype_reserve_table = msm7x27a_reserve_table, .calculate_reserve_sizes = msm7x27a_calculate_reserve_sizes, .paddr_to_memtype = msm7x27a_paddr_to_memtype, }; static void __init msm7x27a_reserve(void) { reserve_info = &msm7x27a_reserve_info; msm_reserve(); } static void __init msm_device_i2c_init(void) { msm_gsbi0_qup_i2c_device.dev.platform_data = &msm_gsbi0_qup_i2c_pdata; msm_gsbi1_qup_i2c_device.dev.platform_data = &msm_gsbi1_qup_i2c_pdata; gpio_tlmm_config(GPIO_CFG(GPIO_TSP_SDA, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(GPIO_TSP_SCL, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(GPIO_MUS_SDA, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(GPIO_MUS_SCL, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA), GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(GPIO_MUSB_INT, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE); /* gpio_tlmm_config(GPIO_CFG(27, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA),GPIO_CFG_ENABLE); */ #if defined(CONFIG_PROXIMITY_SENSOR) gpio_tlmm_config(GPIO_CFG(GPIO_SENSOR_SCL, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE); gpio_tlmm_config(GPIO_CFG(GPIO_SENSOR_SDA, 0, GPIO_CFG_OUTPUT, GPIO_CFG_PULL_UP, GPIO_CFG_2MA),GPIO_CFG_ENABLE); #endif } static struct msm_panel_common_pdata mdp_pdata = { .gpio = 97, .mdp_rev = MDP_REV_303, }; #define GPIO_LCDC_BRDG_PD 128 #define GPIO_LCDC_BRDG_RESET_N 129 #define LCDC_RESET_PHYS 0x90008014 static void __iomem *lcdc_reset_ptr; static unsigned mipi_dsi_gpio[] = { GPIO_CFG(GPIO_LCDC_BRDG_RESET_N, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* LCDC_BRDG_RESET_N */ GPIO_CFG(GPIO_LCDC_BRDG_PD, 0, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), /* LCDC_BRDG_RESET_N */ }; enum { DSI_SINGLE_LANE = 1, DSI_TWO_LANES, }; static int msm_fb_get_lane_config(void) { int rc = DSI_TWO_LANES; if (machine_is_msm7625a_surf() || machine_is_msm7625a_ffa()) { rc = DSI_SINGLE_LANE; pr_info("DSI Single Lane\n"); } else { pr_info("DSI Two Lanes\n"); } return rc; } static int msm_fb_dsi_client_reset(void) { int rc = 0; rc = gpio_request(GPIO_LCDC_BRDG_RESET_N, "lcdc_brdg_reset_n"); if (rc < 0) { pr_err("failed to request lcd brdg reset_n\n"); return rc; } rc = gpio_request(GPIO_LCDC_BRDG_PD, "lcdc_brdg_pd"); if (rc < 0) { pr_err("failed to request lcd brdg pd\n"); return rc; } rc = gpio_tlmm_config(mipi_dsi_gpio[0], GPIO_CFG_ENABLE); if (rc) { pr_err("Failed to enable LCDC Bridge reset enable\n"); goto gpio_error; } rc = gpio_tlmm_config(mipi_dsi_gpio[1], GPIO_CFG_ENABLE); if (rc) { pr_err("Failed to enable LCDC Bridge pd enable\n"); goto gpio_error2; } rc = gpio_direction_output(GPIO_LCDC_BRDG_RESET_N, 1); rc |= gpio_direction_output(GPIO_LCDC_BRDG_PD, 1); gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 0); if (!rc) { if (machine_is_msm7x27a_surf()) { lcdc_reset_ptr = ioremap_nocache(LCDC_RESET_PHYS, sizeof(uint32_t)); if (!lcdc_reset_ptr) return 0; } return rc; } else { goto gpio_error; } gpio_error2: pr_err("Failed GPIO bridge pd\n"); gpio_free(GPIO_LCDC_BRDG_PD); gpio_error: pr_err("Failed GPIO bridge reset\n"); gpio_free(GPIO_LCDC_BRDG_RESET_N); return rc; } static const char * const msm_fb_dsi_vreg[] = { "gp2", "msme1", "mddi" }; static const int msm_fb_dsi_vreg_mV[] = { 2850, 1800, 1200 }; static struct vreg *dsi_vreg[ARRAY_SIZE(msm_fb_dsi_vreg)]; static int dsi_gpio_initialized; static int mipi_dsi_panel_power(int on) { int i, rc = 0; uint32_t lcdc_reset_cfg; /* I2C-controlled GPIO Expander -init of the GPIOs very late */ if (!dsi_gpio_initialized) { pmapp_disp_backlight_init(); rc = gpio_request(GPIO_DISPLAY_PWR_EN, "gpio_disp_pwr"); if (rc < 0) { pr_err("failed to request gpio_disp_pwr\n"); return rc; } rc = gpio_direction_output(GPIO_DISPLAY_PWR_EN, 1); if (rc < 0) { pr_err("failed to enable display pwr\n"); goto fail_gpio1; } if (machine_is_msm7x27a_surf()) { rc = gpio_request(GPIO_BACKLIGHT_EN, "gpio_bkl_en"); if (rc < 0) { pr_err("failed to request gpio_bkl_en\n"); goto fail_gpio1; } rc = gpio_direction_output(GPIO_BACKLIGHT_EN, 1); if (rc < 0) { pr_err("failed to enable backlight\n"); goto fail_gpio2; } } for (i = 0; i < ARRAY_SIZE(msm_fb_dsi_vreg); i++) { dsi_vreg[i] = vreg_get(0, msm_fb_dsi_vreg[i]); if (IS_ERR(dsi_vreg[i])) { pr_err("%s: vreg get failed with : (%ld)\n", __func__, PTR_ERR(dsi_vreg[i])); goto fail_gpio2; } rc = vreg_set_level(dsi_vreg[i], msm_fb_dsi_vreg_mV[i]); if (rc < 0) { pr_err("%s: set regulator level failed " "with :(%d)\n", __func__, rc); goto vreg_fail1; } } dsi_gpio_initialized = 1; } gpio_set_value_cansleep(GPIO_DISPLAY_PWR_EN, on); if (machine_is_msm7x27a_surf()) { gpio_set_value_cansleep(GPIO_BACKLIGHT_EN, on); } if (on) { gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 0); if (machine_is_msm7x27a_surf()) { lcdc_reset_cfg = readl_relaxed(lcdc_reset_ptr); rmb(); lcdc_reset_cfg &= ~1; writel_relaxed(lcdc_reset_cfg, lcdc_reset_ptr); msleep(20); wmb(); lcdc_reset_cfg |= 1; writel_relaxed(lcdc_reset_cfg, lcdc_reset_ptr); } else { gpio_set_value_cansleep(GPIO_LCDC_BRDG_RESET_N, 0); msleep(20); gpio_set_value_cansleep(GPIO_LCDC_BRDG_RESET_N, 1); } if (pmapp_disp_backlight_set_brightness(100)) pr_err("backlight set brightness failed\n"); } else { gpio_set_value_cansleep(GPIO_LCDC_BRDG_PD, 1); if (pmapp_disp_backlight_set_brightness(0)) pr_err("backlight set brightness failed\n"); } /*Configure vreg lines */ for (i = 0; i < ARRAY_SIZE(msm_fb_dsi_vreg); i++) { if (on) { rc = vreg_enable(dsi_vreg[i]); if (rc) { printk(KERN_ERR "vreg_enable: %s vreg" "operation failed\n", msm_fb_dsi_vreg[i]); goto vreg_fail2; } } else { rc = vreg_disable(dsi_vreg[i]); if (rc) { printk(KERN_ERR "vreg_disable: %s vreg " "operation failed\n", msm_fb_dsi_vreg[i]); goto vreg_fail2; } } } return rc; vreg_fail2: if (on) { for (; i > 0; i--) vreg_disable(dsi_vreg[i - 1]); } else { for (; i > 0; i--) vreg_enable(dsi_vreg[i - 1]); } return rc; vreg_fail1: for (; i > 0; i--) vreg_put(dsi_vreg[i - 1]); fail_gpio2: gpio_free(GPIO_BACKLIGHT_EN); fail_gpio1: gpio_free(GPIO_DISPLAY_PWR_EN); dsi_gpio_initialized = 0; return rc; } #define MDP_303_VSYNC_GPIO 97 #ifdef CONFIG_FB_MSM_MDP303 static struct mipi_dsi_platform_data mipi_dsi_pdata = { .vsync_gpio = MDP_303_VSYNC_GPIO, .dsi_power_save = mipi_dsi_panel_power, .dsi_client_reset = msm_fb_dsi_client_reset, .get_lane_config = msm_fb_get_lane_config, }; #endif static void __init msm_fb_add_devices(void) { msm_fb_register_device("mdp", &mdp_pdata); msm_fb_register_device("lcdc", &lcdc_pdata); #ifdef CONFIG_FB_MSM_MDP303 msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata); #endif } #define MSM_EBI2_PHYS 0xa0d00000 #define MSM_EBI2_XMEM_CS2_CFG1 0xa0d10030 static void __init msm7x27a_init_ebi2(void) { uint32_t ebi2_cfg; void __iomem *ebi2_cfg_ptr; ebi2_cfg_ptr = ioremap_nocache(MSM_EBI2_PHYS, sizeof(uint32_t)); if (!ebi2_cfg_ptr) return; ebi2_cfg = readl(ebi2_cfg_ptr); if (machine_is_msm7x27a_rumi3() || machine_is_msm7x27a_surf()) ebi2_cfg |= (1 << 4); /* CS2 */ writel(ebi2_cfg, ebi2_cfg_ptr); iounmap(ebi2_cfg_ptr); /* Enable A/D MUX[bit 31] from EBI2_XMEM_CS2_CFG1 */ ebi2_cfg_ptr = ioremap_nocache(MSM_EBI2_XMEM_CS2_CFG1, sizeof(uint32_t)); if (!ebi2_cfg_ptr) return; ebi2_cfg = readl(ebi2_cfg_ptr); if (machine_is_msm7x27a_surf()) ebi2_cfg |= (1 << 31); writel(ebi2_cfg, ebi2_cfg_ptr); iounmap(ebi2_cfg_ptr); } #define ATMEL_TS_I2C_NAME "maXTouch" static struct regulator_bulk_data regs_atmel[] = { { .supply = "ldo2", .min_uV = 2850000, .max_uV = 2850000 }, { .supply = "smps3", .min_uV = 1800000, .max_uV = 1800000 }, }; #define ATMEL_TS_GPIO_IRQ 82 static int atmel_ts_power_on(bool on) { int rc = on ? regulator_bulk_enable(ARRAY_SIZE(regs_atmel), regs_atmel) : regulator_bulk_disable(ARRAY_SIZE(regs_atmel), regs_atmel); if (rc) pr_err("%s: could not %sable regulators: %d\n", __func__, on ? "en" : "dis", rc); else msleep(50); return rc; } static int atmel_ts_platform_init(struct i2c_client *client) { int rc; struct device *dev = &client->dev; rc = regulator_bulk_get(dev, ARRAY_SIZE(regs_atmel), regs_atmel); if (rc) { dev_err(dev, "%s: could not get regulators: %d\n", __func__, rc); goto out; } rc = regulator_bulk_set_voltage(ARRAY_SIZE(regs_atmel), regs_atmel); if (rc) { dev_err(dev, "%s: could not set voltages: %d\n", __func__, rc); goto reg_free; } rc = gpio_tlmm_config(GPIO_CFG(ATMEL_TS_GPIO_IRQ, 0, GPIO_CFG_INPUT, GPIO_CFG_PULL_UP, GPIO_CFG_8MA), GPIO_CFG_ENABLE); if (rc) { dev_err(dev, "%s: gpio_tlmm_config for %d failed\n", __func__, ATMEL_TS_GPIO_IRQ); goto reg_free; } /* configure touchscreen interrupt gpio */ rc = gpio_request(ATMEL_TS_GPIO_IRQ, "atmel_maxtouch_gpio"); if (rc) { dev_err(dev, "%s: unable to request gpio %d\n", __func__, ATMEL_TS_GPIO_IRQ); goto ts_gpio_tlmm_unconfig; } rc = gpio_direction_input(ATMEL_TS_GPIO_IRQ); if (rc < 0) { dev_err(dev, "%s: unable to set the direction of gpio %d\n", __func__, ATMEL_TS_GPIO_IRQ); goto free_ts_gpio; } return 0; free_ts_gpio: gpio_free(ATMEL_TS_GPIO_IRQ); ts_gpio_tlmm_unconfig: gpio_tlmm_config(GPIO_CFG(ATMEL_TS_GPIO_IRQ, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_DISABLE); reg_free: regulator_bulk_free(ARRAY_SIZE(regs_atmel), regs_atmel); out: return rc; } static int atmel_ts_platform_exit(struct i2c_client *client) { gpio_free(ATMEL_TS_GPIO_IRQ); gpio_tlmm_config(GPIO_CFG(ATMEL_TS_GPIO_IRQ, 0, GPIO_CFG_INPUT, GPIO_CFG_NO_PULL, GPIO_CFG_2MA), GPIO_CFG_DISABLE); regulator_bulk_free(ARRAY_SIZE(regs_atmel), regs_atmel); return 0; } static u8 atmel_ts_read_chg(void) { return gpio_get_value(ATMEL_TS_GPIO_IRQ); } static u8 atmel_ts_valid_interrupt(void) { return !atmel_ts_read_chg(); } #define ATMEL_X_OFFSET 13 #define ATMEL_Y_OFFSET 0 static struct maxtouch_platform_data atmel_ts_pdata = { .numtouch = 4, .init_platform_hw = atmel_ts_platform_init, .exit_platform_hw = atmel_ts_platform_exit, .power_on = atmel_ts_power_on, .display_res_x = 480, .display_res_y = 864, .min_x = ATMEL_X_OFFSET, .max_x = (505 - ATMEL_X_OFFSET), .min_y = ATMEL_Y_OFFSET, .max_y = (863 - ATMEL_Y_OFFSET), .valid_interrupt = atmel_ts_valid_interrupt, .read_chg = atmel_ts_read_chg, }; static struct i2c_board_info atmel_ts_i2c_info[] __initdata = { { I2C_BOARD_INFO(ATMEL_TS_I2C_NAME, 0x4a), .platform_data = &atmel_ts_pdata, .irq = MSM_GPIO_TO_INT(ATMEL_TS_GPIO_IRQ), }, }; #define KP_INDEX(row, col) ((row)*ARRAY_SIZE(kp_col_gpios) + (col)) static unsigned int kp_row_gpios[] = {39}; static unsigned int kp_col_gpios[] = {36, 31}; static const unsigned short keymap[ARRAY_SIZE(kp_col_gpios) * ARRAY_SIZE(kp_row_gpios)] = { [KP_INDEX(0, 0)] = KEY_VOLUMEDOWN, [KP_INDEX(0, 1)] = KEY_VOLUMEUP, }; /* SURF keypad platform device information */ static struct gpio_event_matrix_info kp_matrix_info = { .info.func = gpio_event_matrix_func, .keymap = keymap, .output_gpios = kp_col_gpios, .input_gpios = kp_row_gpios, .noutputs = ARRAY_SIZE(kp_col_gpios), .ninputs = ARRAY_SIZE(kp_row_gpios), .settle_time.tv_nsec = 40 * NSEC_PER_USEC, .poll_time.tv_nsec = 20 * NSEC_PER_MSEC, .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS, }; static struct gpio_event_info *kp_info[] = { &kp_matrix_info.info }; static struct gpio_event_platform_data kp_pdata = { .name = "7x27a_kp", .info = kp_info, .info_count = ARRAY_SIZE(kp_info) }; static struct platform_device kp_pdev = { .name = GPIO_EVENT_DEV_NAME, .id = -1, .dev = { .platform_data = &kp_pdata, }, }; static struct msm_handset_platform_data hs_platform_data = { .hs_name = "sec_jack", .pwr_key_delay_ms = 500, /* 0 will disable end key */ }; static struct platform_device hs_pdev = { .name = "msm-handset", .id = -1, .dev = { .platform_data = &hs_platform_data, }, }; static struct platform_device msm_proccomm_regulator_dev = { .name = PROCCOMM_REGULATOR_DEV_NAME, .id = -1, .dev = { .platform_data = &msm7x27a_proccomm_regulator_data } }; static void __init msm7627a_rumi3_init(void) { msm7x27a_init_ebi2(); platform_add_devices(rumi_sim_devices, ARRAY_SIZE(rumi_sim_devices)); } #define LED_GPIO_PDM 96 #define UART1DM_RX_GPIO 45 static int __init msm7x27a_init_ar6000pm(void) { return platform_device_register(&msm_wlan_ar6000_pm_device); } static void __init msm7x27a_init_regulators(void) { int rc = platform_device_register(&msm_proccomm_regulator_dev); if (rc) pr_err("%s: could not register regulator device: %d\n", __func__, rc); } static struct msm7x27a_regulators { const char *id; unsigned voltage; // in mv }; static struct msm7x27a_regulators msm7x27a_reg[] = { [0] = { .id = "smps3", .voltage = 1800, }, #if 0 [1] = { .id = "ldo9", .voltage = 1800, }, [2] = { .id = "ldo13", .voltage = 2850, }, [3] = { .id = "smps1", .voltage = 1100, }, [4] = { .id = "smps2", .voltage = 1100, }, [5] = { .id = "smps4", .voltage = 2100, }, [4] = { .id = "ldo1", .voltage = 2100, }, [5] = { .id = "ldo2", .voltage = 2100, }, [6] = { .id = "ldo3", .voltage = 1200, }, [7] = { .id = "ldo4", .voltage = 1100, }, [8] = { .id = "ldo5", .voltage = 0, }, [9] = { .id = "ldo6", .voltage = 1200, }, [10] = { .id = "ldo7", .voltage = 2600, }, [11] = { .id = "ldo8", .voltage = 2800, }, [13] = { .id = "ldo10", .voltage = 2850, }, [14] = { .id = "ldo11", .voltage = 1800, }, [15] = { .id = "ldo12", .voltage = 3300, }, [17] = { .id = "ldo14", .voltage = 3000, }, [18] = { .id = "ldo15", .voltage = 1800, }, [19] = { .id = "ldo16", .voltage = 3000, }, [20] = { .id = "ldo17", .voltage = 2800, }, [21] = { .id = "ldo18", .voltage = 2700, }, [22] = { .id = "ldo19", .voltage = 3000, }, #endif }; /* function to Enable the regulators */ static void msm7x27a_enable_regulators(void) { struct vreg *vreg = NULL; struct msm7x27a_regulators *regulators = msm7x27a_reg; static int i = 0, ret =0, VRG_SIZE =0; /* Get the count of regulators */ VRG_SIZE= ARRAY_SIZE(msm7x27a_reg); for(i=0; i<VRG_SIZE; i++) { /* get the regulator descriptor */ vreg = vreg_get( NULL, regulators[i].id); if(IS_ERR(vreg)) { pr_err("failed to get the regulator %s\n",regulators[i].id); return ; } /* set the regulator voltage(optimum) */ ret = vreg_set_level(vreg,regulators[i].voltage); if(ret) { pr_err("failed to set the voltage level for regulator %s\n",regulators[i].id); return ; } /* enable the regulator or ldo */ if(vreg_enable(vreg)) { pr_err("failed to enable the regulator %s\n",regulators[i].id); return ; } } pr_debug("Successfully enabled all regulators\n"); return; } static void __init msm7x2x_init(void) { msm7x2x_misc_init(); /* Initialize the regulators */ msm7x27a_init_regulators(); /* Enable the Required regulators */ msm7x27a_enable_regulators(); /* Common functions for SURF/FFA/RUMI3 */ msm_device_i2c_init(); msm7x27a_init_ebi2(); // msm7x27a_cfg_uart2dm_serial(); #ifdef CONFIG_SERIAL_MSM_HS msm_uart_dm1_pdata.wakeup_irq = gpio_to_irq(UART1DM_RX_GPIO); msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata; #endif #ifdef CONFIG_USB_MSM_OTG_72K msm_otg_pdata.swfi_latency = msm7x27a_pm_data [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency; msm_device_otg.dev.platform_data = &msm_otg_pdata; #endif msm_device_gadget_peripheral.dev.platform_data = &msm_gadget_pdata; /* msm7x27a_cfg_smsc911x(); */ /* FSA driver porting */ #ifdef CONFIG_SAMSUNG_JACK sec_jack_gpio_init(); #endif platform_add_devices(msm_footswitch_devices, msm_num_footswitch_devices); platform_add_devices(surf_ffa_devices, ARRAY_SIZE(surf_ffa_devices)); if (!kernel_uart_flag) { platform_device_register(&msm_device_uart3); } /* Ensure ar6000pm device is registered before MMC/SDC */ msm7x27a_init_ar6000pm(); msm7x27a_init_mmc(); lcdc_trebon_gpio_init(); msm_fb_add_devices(); #ifdef CONFIG_USB_EHCI_MSM_72K msm7x2x_init_host(); #endif msm_pm_set_platform_data(msm7x27a_pm_data, ARRAY_SIZE(msm7x27a_pm_data)); BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata)); register_i2c_devices(); #if defined(CONFIG_BT) && defined(CONFIG_MARIMBA_CORE) msm7627a_bt_power_init(); #endif #ifdef CONFIG_TOUCHSCREEN_ZINITIX_TREBON tsp_power_on(); #endif samsung_sys_class_init(); i2c_register_board_info( 2, touch_i2c_devices, ARRAY_SIZE(touch_i2c_devices)); i2c_register_board_info( 3, fsa880_i2c_devices, ARRAY_SIZE(fsa880_i2c_devices)); #ifdef CONFIG_BQ27425_FUEL_GAUGE i2c_register_board_info(6, fg_i2c_devices, ARRAY_SIZE(fg_i2c_devices)); #endif #ifdef CONFIG_MAX17043_FUELGAUGE i2c_register_board_info(6, fg_smb_i2c_devices, ARRAY_SIZE(fg_smb_i2c_devices)); #endif i2c_register_board_info(MSM_GSBI1_QUP_I2C_BUS_ID, atmel_ts_i2c_info, ARRAY_SIZE(atmel_ts_i2c_info)); #if defined(CONFIG_MSM_CAMERA) msm7627a_camera_init(); #endif platform_device_register(&kp_pdev); platform_device_register(&hs_pdev); /* configure it as a pdm function*/ if (gpio_tlmm_config(GPIO_CFG(LED_GPIO_PDM, 3, GPIO_CFG_OUTPUT, GPIO_CFG_NO_PULL, GPIO_CFG_8MA), GPIO_CFG_ENABLE)) pr_err("%s: gpio_tlmm_config for %d failed\n", __func__, LED_GPIO_PDM); else platform_device_register(&led_pdev); #ifdef CONFIG_PROXIMITY_SENSOR gp2a_init(); /* LED_onoff(1); func call moved to probe in gp2a.c*/ #endif #ifdef CONFIG_MSM_RPC_VIBRATOR if (machine_is_msm7x27a_ffa() || machine_is_msm7625a_ffa()) //msm_init_pmic_vibrator(); #endif /*7x25a kgsl initializations*/ msm7x25a_kgsl_3d0_init(); } static void __init msm7x2x_init_early(void) { msm_msm7x2x_allocate_memory_regions(); } MACHINE_START(MSM7X27A_RUMI3, "QCT MSM7x27a RUMI3") .boot_params = PHYS_OFFSET + 0x100, .map_io = msm_common_io_init, .reserve = msm7x27a_reserve, .init_irq = msm_init_irq, .init_machine = msm7627a_rumi3_init, .timer = &msm_timer, .init_early = msm7x2x_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7X27A_SURF, "QCT MSM7x27a SURF") .boot_params = PHYS_OFFSET + 0x100, .map_io = msm_common_io_init, .reserve = msm7x27a_reserve, .init_irq = msm_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x2x_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7X27A_FFA, "QCT MSM7x27a FFA") .boot_params = PHYS_OFFSET + 0x100, .map_io = msm_common_io_init, .reserve = msm7x27a_reserve, .init_irq = msm_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x2x_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7625A_SURF, "QCT MSM7625a SURF") .boot_params = PHYS_OFFSET + 0x100, .map_io = msm_common_io_init, .reserve = msm7x27a_reserve, .init_irq = msm_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x2x_init_early, .handle_irq = vic_handle_irq, MACHINE_END MACHINE_START(MSM7625A_FFA, "QCT MSM7625a FFA") .boot_params = PHYS_OFFSET + 0x100, .map_io = msm_common_io_init, .reserve = msm7x27a_reserve, .init_irq = msm_init_irq, .init_machine = msm7x2x_init, .timer = &msm_timer, .init_early = msm7x2x_init_early, .handle_irq = vic_handle_irq, MACHINE_END
Dm47021/android_kernel_samsung_centura_sch738c
arch/arm/mach-msm/board-amazing_cdma.c
C
gpl-2.0
89,633
/* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_defer.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_buf_item.h" #include "xfs_btree.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_cksum.h" #include "xfs_alloc.h" #include "xfs_log.h" /* * Cursor allocation zone. */ kmem_zone_t *xfs_btree_cur_zone; /* * Btree magic numbers. */ static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = { { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC, XFS_FIBT_MAGIC }, { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC, XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC } }; #define xfs_btree_magic(cur) \ xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum] STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_lblock( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_btree_block *block, /* btree long form block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp) /* buffer for block, if any */ { int lblock_ok = 1; /* block passes checks */ struct xfs_mount *mp; /* file system mount point */ mp = cur->bc_mp; if (xfs_sb_version_hascrc(&mp->m_sb)) { lblock_ok = lblock_ok && uuid_equal(&block->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid) && block->bb_u.l.bb_blkno == cpu_to_be64( bp ? bp->b_bn : XFS_BUF_DADDR_NULL); } lblock_ok = lblock_ok && be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) && be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && block->bb_u.l.bb_leftsib && (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))) && block->bb_u.l.bb_rightsib && (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))); if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, XFS_RANDOM_BTREE_CHECK_LBLOCK))) { if (bp) trace_xfs_btree_corrupt(bp, _RET_IP_); XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); return -EFSCORRUPTED; } return 0; } STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sblock( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_btree_block *block, /* btree short form block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp) /* buffer containing block */ { struct xfs_mount *mp; /* file system mount point */ struct xfs_buf *agbp; /* buffer for ag. freespace struct */ struct xfs_agf *agf; /* ag. freespace structure */ xfs_agblock_t agflen; /* native ag. freespace length */ int sblock_ok = 1; /* block passes checks */ mp = cur->bc_mp; agbp = cur->bc_private.a.agbp; agf = XFS_BUF_TO_AGF(agbp); agflen = be32_to_cpu(agf->agf_length); if (xfs_sb_version_hascrc(&mp->m_sb)) { sblock_ok = sblock_ok && uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid) && block->bb_u.s.bb_blkno == cpu_to_be64( bp ? bp->b_bn : XFS_BUF_DADDR_NULL); } sblock_ok = sblock_ok && be32_to_cpu(block->bb_magic) == xfs_btree_magic(cur) && be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) || be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) && block->bb_u.s.bb_leftsib && (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) || be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) && block->bb_u.s.bb_rightsib; if (unlikely(XFS_TEST_ERROR(!sblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK, XFS_RANDOM_BTREE_CHECK_SBLOCK))) { if (bp) trace_xfs_btree_corrupt(bp, _RET_IP_); XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); return -EFSCORRUPTED; } return 0; } /* * Debug routine: check that block header is ok. */ int xfs_btree_check_block( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_btree_block *block, /* generic btree block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp) /* buffer containing block, if any */ { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) return xfs_btree_check_lblock(cur, block, level, bp); else return xfs_btree_check_sblock(cur, block, level, bp); } /* * Check that (long) pointer is ok. */ int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_lptr( struct xfs_btree_cur *cur, /* btree cursor */ xfs_fsblock_t bno, /* btree block disk address */ int level) /* btree block level */ { XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, level > 0 && bno != NULLFSBLOCK && XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); return 0; } #ifdef DEBUG /* * Check that (short) pointer is ok. */ STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sptr( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* btree block disk address */ int level) /* btree block level */ { xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks; XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, level > 0 && bno != NULLAGBLOCK && bno != 0 && bno < agblocks); return 0; } /* * Check that block ptr is ok. */ STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_ptr( struct xfs_btree_cur *cur, /* btree cursor */ union xfs_btree_ptr *ptr, /* btree block disk address */ int index, /* offset from ptr to check */ int level) /* btree block level */ { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { return xfs_btree_check_lptr(cur, be64_to_cpu((&ptr->l)[index]), level); } else { return xfs_btree_check_sptr(cur, be32_to_cpu((&ptr->s)[index]), level); } } #endif /* * Calculate CRC on the whole btree block and stuff it into the * long-form btree header. * * Prior to calculting the CRC, pull the LSN out of the buffer log item and put * it into the buffer so recovery knows what the last modification was that made * it to disk. */ void xfs_btree_lblock_calc_crc( struct xfs_buf *bp) { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_buf_log_item *bip = bp->b_fspriv; if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) return; if (bip) block->bb_u.l.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_buf_update_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); } bool xfs_btree_lblock_verify_crc( struct xfs_buf *bp) { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_mount *mp = bp->b_target->bt_mount; if (xfs_sb_version_hascrc(&mp->m_sb)) { if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.l.bb_lsn))) return false; return xfs_buf_verify_cksum(bp, XFS_BTREE_LBLOCK_CRC_OFF); } return true; } /* * Calculate CRC on the whole btree block and stuff it into the * short-form btree header. * * Prior to calculting the CRC, pull the LSN out of the buffer log item and put * it into the buffer so recovery knows what the last modification was that made * it to disk. */ void xfs_btree_sblock_calc_crc( struct xfs_buf *bp) { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_buf_log_item *bip = bp->b_fspriv; if (!xfs_sb_version_hascrc(&bp->b_target->bt_mount->m_sb)) return; if (bip) block->bb_u.s.bb_lsn = cpu_to_be64(bip->bli_item.li_lsn); xfs_buf_update_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); } bool xfs_btree_sblock_verify_crc( struct xfs_buf *bp) { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_mount *mp = bp->b_target->bt_mount; if (xfs_sb_version_hascrc(&mp->m_sb)) { if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn))) return false; return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF); } return true; } static int xfs_btree_free_block( struct xfs_btree_cur *cur, struct xfs_buf *bp) { int error; error = cur->bc_ops->free_block(cur, bp); if (!error) { xfs_trans_binval(cur->bc_tp, bp); XFS_BTREE_STATS_INC(cur, free); } return error; } /* * Delete the btree cursor. */ void xfs_btree_del_cursor( xfs_btree_cur_t *cur, /* btree cursor */ int error) /* del because of error */ { int i; /* btree level */ /* * Clear the buffer pointers, and release the buffers. * If we're doing this in the face of an error, we * need to make sure to inspect all of the entries * in the bc_bufs array for buffers to be unlocked. * This is because some of the btree code works from * level n down to 0, and if we get an error along * the way we won't have initialized all the entries * down to 0. */ for (i = 0; i < cur->bc_nlevels; i++) { if (cur->bc_bufs[i]) xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]); else if (!error) break; } /* * Can't free a bmap cursor without having dealt with the * allocated indirect blocks' accounting. */ ASSERT(cur->bc_btnum != XFS_BTNUM_BMAP || cur->bc_private.b.allocated == 0); /* * Free the cursor. */ kmem_zone_free(xfs_btree_cur_zone, cur); } /* * Duplicate the btree cursor. * Allocate a new one, copy the record, re-get the buffers. */ int /* error */ xfs_btree_dup_cursor( xfs_btree_cur_t *cur, /* input cursor */ xfs_btree_cur_t **ncur) /* output cursor */ { xfs_buf_t *bp; /* btree block's buffer pointer */ int error; /* error return value */ int i; /* level number of btree block */ xfs_mount_t *mp; /* mount structure for filesystem */ xfs_btree_cur_t *new; /* new cursor value */ xfs_trans_t *tp; /* transaction pointer, can be NULL */ tp = cur->bc_tp; mp = cur->bc_mp; /* * Allocate a new cursor like the old one. */ new = cur->bc_ops->dup_cursor(cur); /* * Copy the record currently in the cursor. */ new->bc_rec = cur->bc_rec; /* * For each level current, re-get the buffer and copy the ptr value. */ for (i = 0; i < new->bc_nlevels; i++) { new->bc_ptrs[i] = cur->bc_ptrs[i]; new->bc_ra[i] = cur->bc_ra[i]; bp = cur->bc_bufs[i]; if (bp) { error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, XFS_BUF_ADDR(bp), mp->m_bsize, 0, &bp, cur->bc_ops->buf_ops); if (error) { xfs_btree_del_cursor(new, error); *ncur = NULL; return error; } } new->bc_bufs[i] = bp; } *ncur = new; return 0; } /* * XFS btree block layout and addressing: * * There are two types of blocks in the btree: leaf and non-leaf blocks. * * The leaf record start with a header then followed by records containing * the values. A non-leaf block also starts with the same header, and * then first contains lookup keys followed by an equal number of pointers * to the btree blocks at the previous level. * * +--------+-------+-------+-------+-------+-------+-------+ * Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N | * +--------+-------+-------+-------+-------+-------+-------+ * * +--------+-------+-------+-------+-------+-------+-------+ * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N | * +--------+-------+-------+-------+-------+-------+-------+ * * The header is called struct xfs_btree_block for reasons better left unknown * and comes in different versions for short (32bit) and long (64bit) block * pointers. The record and key structures are defined by the btree instances * and opaque to the btree core. The block pointers are simple disk endian * integers, available in a short (32bit) and long (64bit) variant. * * The helpers below calculate the offset of a given record, key or pointer * into a btree block (xfs_btree_*_offset) or return a pointer to the given * record, key or pointer (xfs_btree_*_addr). Note that all addressing * inside the btree block is done using indices starting at one, not zero! * * If XFS_BTREE_OVERLAPPING is set, then this btree supports keys containing * overlapping intervals. In such a tree, records are still sorted lowest to * highest and indexed by the smallest key value that refers to the record. * However, nodes are different: each pointer has two associated keys -- one * indexing the lowest key available in the block(s) below (the same behavior * as the key in a regular btree) and another indexing the highest key * available in the block(s) below. Because records are /not/ sorted by the * highest key, all leaf block updates require us to compute the highest key * that matches any record in the leaf and to recursively update the high keys * in the nodes going further up in the tree, if necessary. Nodes look like * this: * * +--------+-----+-----+-----+-----+-----+-------+-------+-----+ * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... | * +--------+-----+-----+-----+-----+-----+-------+-------+-----+ * * To perform an interval query on an overlapped tree, perform the usual * depth-first search and use the low and high keys to decide if we can skip * that particular node. If a leaf node is reached, return the records that * intersect the interval. Note that an interval query may return numerous * entries. For a non-overlapped tree, simply search for the record associated * with the lowest key and iterate forward until a non-matching record is * found. Section 14.3 ("Interval Trees") of _Introduction to Algorithms_ by * Cormen, Leiserson, Rivest, and Stein (2nd or 3rd ed. only) discuss this in * more detail. * * Why do we care about overlapping intervals? Let's say you have a bunch of * reverse mapping records on a reflink filesystem: * * 1: +- file A startblock B offset C length D -----------+ * 2: +- file E startblock F offset G length H --------------+ * 3: +- file I startblock F offset J length K --+ * 4: +- file L... --+ * * Now say we want to map block (B+D) into file A at offset (C+D). Ideally, * we'd simply increment the length of record 1. But how do we find the record * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return * record 3 because the keys are ordered first by startblock. An interval * query would return records 1 and 2 because they both overlap (B+D-1), and * from that we can pick out record 1 as the appropriate left neighbor. * * In the non-overlapped case you can do a LE lookup and decrement the cursor * because a record's interval must end before the next record. */ /* * Return size of the btree block header for this btree instance. */ static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) return XFS_BTREE_LBLOCK_CRC_LEN; return XFS_BTREE_LBLOCK_LEN; } if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) return XFS_BTREE_SBLOCK_CRC_LEN; return XFS_BTREE_SBLOCK_LEN; } /* * Return size of btree block pointers for this btree instance. */ static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur) { return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? sizeof(__be64) : sizeof(__be32); } /* * Calculate offset of the n-th record in a btree block. */ STATIC size_t xfs_btree_rec_offset( struct xfs_btree_cur *cur, int n) { return xfs_btree_block_len(cur) + (n - 1) * cur->bc_ops->rec_len; } /* * Calculate offset of the n-th key in a btree block. */ STATIC size_t xfs_btree_key_offset( struct xfs_btree_cur *cur, int n) { return xfs_btree_block_len(cur) + (n - 1) * cur->bc_ops->key_len; } /* * Calculate offset of the n-th high key in a btree block. */ STATIC size_t xfs_btree_high_key_offset( struct xfs_btree_cur *cur, int n) { return xfs_btree_block_len(cur) + (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2); } /* * Calculate offset of the n-th block pointer in a btree block. */ STATIC size_t xfs_btree_ptr_offset( struct xfs_btree_cur *cur, int n, int level) { return xfs_btree_block_len(cur) + cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + (n - 1) * xfs_btree_ptr_len(cur); } /* * Return a pointer to the n-th record in the btree block. */ STATIC union xfs_btree_rec * xfs_btree_rec_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) { return (union xfs_btree_rec *) ((char *)block + xfs_btree_rec_offset(cur, n)); } /* * Return a pointer to the n-th key in the btree block. */ STATIC union xfs_btree_key * xfs_btree_key_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) { return (union xfs_btree_key *) ((char *)block + xfs_btree_key_offset(cur, n)); } /* * Return a pointer to the n-th high key in the btree block. */ STATIC union xfs_btree_key * xfs_btree_high_key_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) { return (union xfs_btree_key *) ((char *)block + xfs_btree_high_key_offset(cur, n)); } /* * Return a pointer to the n-th block pointer in the btree block. */ STATIC union xfs_btree_ptr * xfs_btree_ptr_addr( struct xfs_btree_cur *cur, int n, struct xfs_btree_block *block) { int level = xfs_btree_get_level(block); ASSERT(block->bb_level != 0); return (union xfs_btree_ptr *) ((char *)block + xfs_btree_ptr_offset(cur, n, level)); } /* * Get the root block which is stored in the inode. * * For now this btree implementation assumes the btree root is always * stored in the if_broot field of an inode fork. */ STATIC struct xfs_btree_block * xfs_btree_get_iroot( struct xfs_btree_cur *cur) { struct xfs_ifork *ifp; ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork); return (struct xfs_btree_block *)ifp->if_broot; } /* * Retrieve the block pointer from the cursor at the given level. * This may be an inode btree root or from a buffer. */ STATIC struct xfs_btree_block * /* generic btree block pointer */ xfs_btree_get_block( struct xfs_btree_cur *cur, /* btree cursor */ int level, /* level in btree */ struct xfs_buf **bpp) /* buffer containing the block */ { if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && (level == cur->bc_nlevels - 1)) { *bpp = NULL; return xfs_btree_get_iroot(cur); } *bpp = cur->bc_bufs[level]; return XFS_BUF_TO_BLOCK(*bpp); } /* * Get a buffer for the block, return it with no data read. * Long-form addressing. */ xfs_buf_t * /* buffer for fsbno */ xfs_btree_get_bufl( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_fsblock_t fsbno, /* file system block number */ uint lock) /* lock flags for get_buf */ { xfs_daddr_t d; /* real disk block address */ ASSERT(fsbno != NULLFSBLOCK); d = XFS_FSB_TO_DADDR(mp, fsbno); return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); } /* * Get a buffer for the block, return it with no data read. * Short-form addressing. */ xfs_buf_t * /* buffer for agno/agbno */ xfs_btree_get_bufs( xfs_mount_t *mp, /* file system mount point */ xfs_trans_t *tp, /* transaction pointer */ xfs_agnumber_t agno, /* allocation group number */ xfs_agblock_t agbno, /* allocation group block number */ uint lock) /* lock flags for get_buf */ { xfs_daddr_t d; /* real disk block address */ ASSERT(agno != NULLAGNUMBER); ASSERT(agbno != NULLAGBLOCK); d = XFS_AGB_TO_DADDR(mp, agno, agbno); return xfs_trans_get_buf(tp, mp->m_ddev_targp, d, mp->m_bsize, lock); } /* * Check for the cursor referring to the last block at the given level. */ int /* 1=is last block, 0=not last block */ xfs_btree_islastblock( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to check */ { struct xfs_btree_block *block; /* generic btree block pointer */ xfs_buf_t *bp; /* buffer containing block */ block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); else return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); } /* * Change the cursor to point to the first record at the given level. * Other levels are unaffected. */ STATIC int /* success=1, failure=0 */ xfs_btree_firstrec( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to change */ { struct xfs_btree_block *block; /* generic btree block pointer */ xfs_buf_t *bp; /* buffer containing block */ /* * Get the block pointer for this level. */ block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); /* * It's empty, there is no such record. */ if (!block->bb_numrecs) return 0; /* * Set the ptr value to 1, that's the first record/key. */ cur->bc_ptrs[level] = 1; return 1; } /* * Change the cursor to point to the last record in the current block * at the given level. Other levels are unaffected. */ STATIC int /* success=1, failure=0 */ xfs_btree_lastrec( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to change */ { struct xfs_btree_block *block; /* generic btree block pointer */ xfs_buf_t *bp; /* buffer containing block */ /* * Get the block pointer for this level. */ block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); /* * It's empty, there is no such record. */ if (!block->bb_numrecs) return 0; /* * Set the ptr value to numrecs, that's the last record/key. */ cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs); return 1; } /* * Compute first and last byte offsets for the fields given. * Interprets the offsets table, which contains struct field offsets. */ void xfs_btree_offsets( __int64_t fields, /* bitmask of fields */ const short *offsets, /* table of field offsets */ int nbits, /* number of bits to inspect */ int *first, /* output: first byte offset */ int *last) /* output: last byte offset */ { int i; /* current bit number */ __int64_t imask; /* mask for current bit number */ ASSERT(fields != 0); /* * Find the lowest bit, so the first byte offset. */ for (i = 0, imask = 1LL; ; i++, imask <<= 1) { if (imask & fields) { *first = offsets[i]; break; } } /* * Find the highest bit, so the last byte offset. */ for (i = nbits - 1, imask = 1LL << i; ; i--, imask >>= 1) { if (imask & fields) { *last = offsets[i + 1] - 1; break; } } } /* * Get a buffer for the block, return it read in. * Long-form addressing. */ int xfs_btree_read_bufl( struct xfs_mount *mp, /* file system mount point */ struct xfs_trans *tp, /* transaction pointer */ xfs_fsblock_t fsbno, /* file system block number */ uint lock, /* lock flags for read_buf */ struct xfs_buf **bpp, /* buffer for fsbno */ int refval, /* ref count value for buffer */ const struct xfs_buf_ops *ops) { struct xfs_buf *bp; /* return value */ xfs_daddr_t d; /* real disk block address */ int error; ASSERT(fsbno != NULLFSBLOCK); d = XFS_FSB_TO_DADDR(mp, fsbno); error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d, mp->m_bsize, lock, &bp, ops); if (error) return error; if (bp) xfs_buf_set_ref(bp, refval); *bpp = bp; return 0; } /* * Read-ahead the block, don't wait for it, don't return a buffer. * Long-form addressing. */ /* ARGSUSED */ void xfs_btree_reada_bufl( struct xfs_mount *mp, /* file system mount point */ xfs_fsblock_t fsbno, /* file system block number */ xfs_extlen_t count, /* count of filesystem blocks */ const struct xfs_buf_ops *ops) { xfs_daddr_t d; ASSERT(fsbno != NULLFSBLOCK); d = XFS_FSB_TO_DADDR(mp, fsbno); xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops); } /* * Read-ahead the block, don't wait for it, don't return a buffer. * Short-form addressing. */ /* ARGSUSED */ void xfs_btree_reada_bufs( struct xfs_mount *mp, /* file system mount point */ xfs_agnumber_t agno, /* allocation group number */ xfs_agblock_t agbno, /* allocation group block number */ xfs_extlen_t count, /* count of filesystem blocks */ const struct xfs_buf_ops *ops) { xfs_daddr_t d; ASSERT(agno != NULLAGNUMBER); ASSERT(agbno != NULLAGBLOCK); d = XFS_AGB_TO_DADDR(mp, agno, agbno); xfs_buf_readahead(mp->m_ddev_targp, d, mp->m_bsize * count, ops); } STATIC int xfs_btree_readahead_lblock( struct xfs_btree_cur *cur, int lr, struct xfs_btree_block *block) { int rval = 0; xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) { xfs_btree_reada_bufl(cur->bc_mp, left, 1, cur->bc_ops->buf_ops); rval++; } if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) { xfs_btree_reada_bufl(cur->bc_mp, right, 1, cur->bc_ops->buf_ops); rval++; } return rval; } STATIC int xfs_btree_readahead_sblock( struct xfs_btree_cur *cur, int lr, struct xfs_btree_block *block) { int rval = 0; xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib); xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib); if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) { xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, left, 1, cur->bc_ops->buf_ops); rval++; } if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) { xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, right, 1, cur->bc_ops->buf_ops); rval++; } return rval; } /* * Read-ahead btree blocks, at the given level. * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. */ STATIC int xfs_btree_readahead( struct xfs_btree_cur *cur, /* btree cursor */ int lev, /* level in btree */ int lr) /* left/right bits */ { struct xfs_btree_block *block; /* * No readahead needed if we are at the root level and the * btree root is stored in the inode. */ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && (lev == cur->bc_nlevels - 1)) return 0; if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) return 0; cur->bc_ra[lev] |= lr; block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) return xfs_btree_readahead_lblock(cur, lr, block); return xfs_btree_readahead_sblock(cur, lr, block); } STATIC xfs_daddr_t xfs_btree_ptr_to_daddr( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK)); return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); } else { ASSERT(cur->bc_private.a.agno != NULLAGNUMBER); ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK)); return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno, be32_to_cpu(ptr->s)); } } /* * Readahead @count btree blocks at the given @ptr location. * * We don't need to care about long or short form btrees here as we have a * method of converting the ptr directly to a daddr available to us. */ STATIC void xfs_btree_readahead_ptr( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, xfs_extlen_t count) { xfs_buf_readahead(cur->bc_mp->m_ddev_targp, xfs_btree_ptr_to_daddr(cur, ptr), cur->bc_mp->m_bsize * count, cur->bc_ops->buf_ops); } /* * Set the buffer for level "lev" in the cursor to bp, releasing * any previous buffer. */ STATIC void xfs_btree_setbuf( xfs_btree_cur_t *cur, /* btree cursor */ int lev, /* level in btree */ xfs_buf_t *bp) /* new buffer to set */ { struct xfs_btree_block *b; /* btree block */ if (cur->bc_bufs[lev]) xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[lev]); cur->bc_bufs[lev] = bp; cur->bc_ra[lev] = 0; b = XFS_BUF_TO_BLOCK(bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } else { if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } } STATIC int xfs_btree_ptr_is_null( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) return ptr->l == cpu_to_be64(NULLFSBLOCK); else return ptr->s == cpu_to_be32(NULLAGBLOCK); } STATIC void xfs_btree_set_ptr_null( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) ptr->l = cpu_to_be64(NULLFSBLOCK); else ptr->s = cpu_to_be32(NULLAGBLOCK); } /* * Get/set/init sibling pointers */ STATIC void xfs_btree_get_sibling( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_ptr *ptr, int lr) { ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (lr == XFS_BB_RIGHTSIB) ptr->l = block->bb_u.l.bb_rightsib; else ptr->l = block->bb_u.l.bb_leftsib; } else { if (lr == XFS_BB_RIGHTSIB) ptr->s = block->bb_u.s.bb_rightsib; else ptr->s = block->bb_u.s.bb_leftsib; } } STATIC void xfs_btree_set_sibling( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_ptr *ptr, int lr) { ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (lr == XFS_BB_RIGHTSIB) block->bb_u.l.bb_rightsib = ptr->l; else block->bb_u.l.bb_leftsib = ptr->l; } else { if (lr == XFS_BB_RIGHTSIB) block->bb_u.s.bb_rightsib = ptr->s; else block->bb_u.s.bb_leftsib = ptr->s; } } void xfs_btree_init_block_int( struct xfs_mount *mp, struct xfs_btree_block *buf, xfs_daddr_t blkno, __u32 magic, __u16 level, __u16 numrecs, __u64 owner, unsigned int flags) { buf->bb_magic = cpu_to_be32(magic); buf->bb_level = cpu_to_be16(level); buf->bb_numrecs = cpu_to_be16(numrecs); if (flags & XFS_BTREE_LONG_PTRS) { buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); if (flags & XFS_BTREE_CRC_BLOCKS) { buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); buf->bb_u.l.bb_owner = cpu_to_be64(owner); uuid_copy(&buf->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid); buf->bb_u.l.bb_pad = 0; buf->bb_u.l.bb_lsn = 0; } } else { /* owner is a 32 bit value on short blocks */ __u32 __owner = (__u32)owner; buf->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); buf->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); if (flags & XFS_BTREE_CRC_BLOCKS) { buf->bb_u.s.bb_blkno = cpu_to_be64(blkno); buf->bb_u.s.bb_owner = cpu_to_be32(__owner); uuid_copy(&buf->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid); buf->bb_u.s.bb_lsn = 0; } } } void xfs_btree_init_block( struct xfs_mount *mp, struct xfs_buf *bp, __u32 magic, __u16 level, __u16 numrecs, __u64 owner, unsigned int flags) { xfs_btree_init_block_int(mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, magic, level, numrecs, owner, flags); } STATIC void xfs_btree_init_block_cur( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, int numrecs) { __u64 owner; /* * we can pull the owner from the cursor right now as the different * owners align directly with the pointer size of the btree. This may * change in future, but is safe for current users of the generic btree * code. */ if (cur->bc_flags & XFS_BTREE_LONG_PTRS) owner = cur->bc_private.b.ip->i_ino; else owner = cur->bc_private.a.agno; xfs_btree_init_block_int(cur->bc_mp, XFS_BUF_TO_BLOCK(bp), bp->b_bn, xfs_btree_magic(cur), level, numrecs, owner, cur->bc_flags); } /* * Return true if ptr is the last record in the btree and * we need to track updates to this record. The decision * will be further refined in the update_lastrec method. */ STATIC int xfs_btree_is_lastrec( struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level) { union xfs_btree_ptr ptr; if (level > 0) return 0; if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE)) return 0; xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); if (!xfs_btree_ptr_is_null(cur, &ptr)) return 0; return 1; } STATIC void xfs_btree_buf_to_ptr( struct xfs_btree_cur *cur, struct xfs_buf *bp, union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp))); else { ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp))); } } STATIC void xfs_btree_set_refs( struct xfs_btree_cur *cur, struct xfs_buf *bp) { switch (cur->bc_btnum) { case XFS_BTNUM_BNO: case XFS_BTNUM_CNT: xfs_buf_set_ref(bp, XFS_ALLOC_BTREE_REF); break; case XFS_BTNUM_INO: case XFS_BTNUM_FINO: xfs_buf_set_ref(bp, XFS_INO_BTREE_REF); break; case XFS_BTNUM_BMAP: xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF); break; case XFS_BTNUM_RMAP: xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF); break; default: ASSERT(0); } } STATIC int xfs_btree_get_buf_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int flags, struct xfs_btree_block **block, struct xfs_buf **bpp) { struct xfs_mount *mp = cur->bc_mp; xfs_daddr_t d; /* need to sort out how callers deal with failures first */ ASSERT(!(flags & XBF_TRYLOCK)); d = xfs_btree_ptr_to_daddr(cur, ptr); *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize, flags); if (!*bpp) return -ENOMEM; (*bpp)->b_ops = cur->bc_ops->buf_ops; *block = XFS_BUF_TO_BLOCK(*bpp); return 0; } /* * Read in the buffer at the given ptr and return the buffer and * the block pointer within the buffer. */ STATIC int xfs_btree_read_buf_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int flags, struct xfs_btree_block **block, struct xfs_buf **bpp) { struct xfs_mount *mp = cur->bc_mp; xfs_daddr_t d; int error; /* need to sort out how callers deal with failures first */ ASSERT(!(flags & XBF_TRYLOCK)); d = xfs_btree_ptr_to_daddr(cur, ptr); error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, mp->m_bsize, flags, bpp, cur->bc_ops->buf_ops); if (error) return error; xfs_btree_set_refs(cur, *bpp); *block = XFS_BUF_TO_BLOCK(*bpp); return 0; } /* * Copy keys from one btree block to another. */ STATIC void xfs_btree_copy_keys( struct xfs_btree_cur *cur, union xfs_btree_key *dst_key, union xfs_btree_key *src_key, int numkeys) { ASSERT(numkeys >= 0); memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); } /* * Copy records from one btree block to another. */ STATIC void xfs_btree_copy_recs( struct xfs_btree_cur *cur, union xfs_btree_rec *dst_rec, union xfs_btree_rec *src_rec, int numrecs) { ASSERT(numrecs >= 0); memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); } /* * Copy block pointers from one btree block to another. */ STATIC void xfs_btree_copy_ptrs( struct xfs_btree_cur *cur, union xfs_btree_ptr *dst_ptr, union xfs_btree_ptr *src_ptr, int numptrs) { ASSERT(numptrs >= 0); memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur)); } /* * Shift keys one index left/right inside a single btree block. */ STATIC void xfs_btree_shift_keys( struct xfs_btree_cur *cur, union xfs_btree_key *key, int dir, int numkeys) { char *dst_key; ASSERT(numkeys >= 0); ASSERT(dir == 1 || dir == -1); dst_key = (char *)key + (dir * cur->bc_ops->key_len); memmove(dst_key, key, numkeys * cur->bc_ops->key_len); } /* * Shift records one index left/right inside a single btree block. */ STATIC void xfs_btree_shift_recs( struct xfs_btree_cur *cur, union xfs_btree_rec *rec, int dir, int numrecs) { char *dst_rec; ASSERT(numrecs >= 0); ASSERT(dir == 1 || dir == -1); dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); } /* * Shift block pointers one index left/right inside a single btree block. */ STATIC void xfs_btree_shift_ptrs( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int dir, int numptrs) { char *dst_ptr; ASSERT(numptrs >= 0); ASSERT(dir == 1 || dir == -1); dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur)); memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur)); } /* * Log key values from the btree block. */ STATIC void xfs_btree_log_keys( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); if (bp) { xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_key_offset(cur, first), xfs_btree_key_offset(cur, last + 1) - 1); } else { xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, xfs_ilog_fbroot(cur->bc_private.b.whichfork)); } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); } /* * Log record values from the btree block. */ void xfs_btree_log_recs( struct xfs_btree_cur *cur, struct xfs_buf *bp, int first, int last) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_rec_offset(cur, first), xfs_btree_rec_offset(cur, last + 1) - 1); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); } /* * Log block pointer fields from a btree block (nonleaf). */ STATIC void xfs_btree_log_ptrs( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_buf *bp, /* buffer containing btree block */ int first, /* index of first pointer to log */ int last) /* index of last pointer to log */ { XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); if (bp) { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); int level = xfs_btree_get_level(block); xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_ptr_offset(cur, first, level), xfs_btree_ptr_offset(cur, last + 1, level) - 1); } else { xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, xfs_ilog_fbroot(cur->bc_private.b.whichfork)); } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); } /* * Log fields from a btree block header. */ void xfs_btree_log_block( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_buf *bp, /* buffer containing btree block */ int fields) /* mask of fields: XFS_BB_... */ { int first; /* first byte offset logged */ int last; /* last byte offset logged */ static const short soffsets[] = { /* table of offsets (short) */ offsetof(struct xfs_btree_block, bb_magic), offsetof(struct xfs_btree_block, bb_level), offsetof(struct xfs_btree_block, bb_numrecs), offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib), offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib), offsetof(struct xfs_btree_block, bb_u.s.bb_blkno), offsetof(struct xfs_btree_block, bb_u.s.bb_lsn), offsetof(struct xfs_btree_block, bb_u.s.bb_uuid), offsetof(struct xfs_btree_block, bb_u.s.bb_owner), offsetof(struct xfs_btree_block, bb_u.s.bb_crc), XFS_BTREE_SBLOCK_CRC_LEN }; static const short loffsets[] = { /* table of offsets (long) */ offsetof(struct xfs_btree_block, bb_magic), offsetof(struct xfs_btree_block, bb_level), offsetof(struct xfs_btree_block, bb_numrecs), offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib), offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib), offsetof(struct xfs_btree_block, bb_u.l.bb_blkno), offsetof(struct xfs_btree_block, bb_u.l.bb_lsn), offsetof(struct xfs_btree_block, bb_u.l.bb_uuid), offsetof(struct xfs_btree_block, bb_u.l.bb_owner), offsetof(struct xfs_btree_block, bb_u.l.bb_crc), offsetof(struct xfs_btree_block, bb_u.l.bb_pad), XFS_BTREE_LBLOCK_CRC_LEN }; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGBI(cur, bp, fields); if (bp) { int nbits; if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { /* * We don't log the CRC when updating a btree * block but instead recreate it during log * recovery. As the log buffers have checksums * of their own this is safe and avoids logging a crc * update in a lot of places. */ if (fields == XFS_BB_ALL_BITS) fields = XFS_BB_ALL_BITS_CRC; nbits = XFS_BB_NUM_BITS_CRC; } else { nbits = XFS_BB_NUM_BITS; } xfs_btree_offsets(fields, (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? loffsets : soffsets, nbits, &first, &last); xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_trans_log_buf(cur->bc_tp, bp, first, last); } else { xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, xfs_ilog_fbroot(cur->bc_private.b.whichfork)); } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); } /* * Increment cursor by one record at the level. * For nonzero levels the leaf-ward information is untouched. */ int /* error */ xfs_btree_increment( struct xfs_btree_cur *cur, int level, int *stat) /* success/failure */ { struct xfs_btree_block *block; union xfs_btree_ptr ptr; struct xfs_buf *bp; int error; /* error return value */ int lev; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGI(cur, level); ASSERT(level < cur->bc_nlevels); /* Read-ahead to the right at this level. */ xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); /* Get a pointer to the btree block. */ block = xfs_btree_get_block(cur, level, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) goto error0; #endif /* We're done if we remain in the block after the increment. */ if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block)) goto out1; /* Fail if we just went off the right edge of the tree. */ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); if (xfs_btree_ptr_is_null(cur, &ptr)) goto out0; XFS_BTREE_STATS_INC(cur, increment); /* * March up the tree incrementing pointers. * Stop when we don't go off the right edge of a block. */ for (lev = level + 1; lev < cur->bc_nlevels; lev++) { block = xfs_btree_get_block(cur, lev, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, lev, bp); if (error) goto error0; #endif if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block)) break; /* Read-ahead the right block for the next loop. */ xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); } /* * If we went off the root then we are either seriously * confused or have the tree root in an inode. */ if (lev == cur->bc_nlevels) { if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) goto out0; ASSERT(0); error = -EFSCORRUPTED; goto error0; } ASSERT(lev < cur->bc_nlevels); /* * Now walk back down the tree, fixing up the cursor's buffer * pointers and key numbers. */ for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { union xfs_btree_ptr *ptrp; ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); --lev; error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); if (error) goto error0; xfs_btree_setbuf(cur, lev, bp); cur->bc_ptrs[lev] = 1; } out1: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* * Decrement cursor by one record at the level. * For nonzero levels the leaf-ward information is untouched. */ int /* error */ xfs_btree_decrement( struct xfs_btree_cur *cur, int level, int *stat) /* success/failure */ { struct xfs_btree_block *block; xfs_buf_t *bp; int error; /* error return value */ int lev; union xfs_btree_ptr ptr; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGI(cur, level); ASSERT(level < cur->bc_nlevels); /* Read-ahead to the left at this level. */ xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); /* We're done if we remain in the block after the decrement. */ if (--cur->bc_ptrs[level] > 0) goto out1; /* Get a pointer to the btree block. */ block = xfs_btree_get_block(cur, level, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) goto error0; #endif /* Fail if we just went off the left edge of the tree. */ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); if (xfs_btree_ptr_is_null(cur, &ptr)) goto out0; XFS_BTREE_STATS_INC(cur, decrement); /* * March up the tree decrementing pointers. * Stop when we don't go off the left edge of a block. */ for (lev = level + 1; lev < cur->bc_nlevels; lev++) { if (--cur->bc_ptrs[lev] > 0) break; /* Read-ahead the left block for the next loop. */ xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); } /* * If we went off the root then we are seriously confused. * or the root of the tree is in an inode. */ if (lev == cur->bc_nlevels) { if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) goto out0; ASSERT(0); error = -EFSCORRUPTED; goto error0; } ASSERT(lev < cur->bc_nlevels); /* * Now walk back down the tree, fixing up the cursor's buffer * pointers and key numbers. */ for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { union xfs_btree_ptr *ptrp; ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); --lev; error = xfs_btree_read_buf_block(cur, ptrp, 0, &block, &bp); if (error) goto error0; xfs_btree_setbuf(cur, lev, bp); cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block); } out1: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } STATIC int xfs_btree_lookup_get_block( struct xfs_btree_cur *cur, /* btree cursor */ int level, /* level in the btree */ union xfs_btree_ptr *pp, /* ptr to btree block */ struct xfs_btree_block **blkp) /* return btree block */ { struct xfs_buf *bp; /* buffer pointer for btree block */ int error = 0; /* special case the root block if in an inode */ if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && (level == cur->bc_nlevels - 1)) { *blkp = xfs_btree_get_iroot(cur); return 0; } /* * If the old buffer at this level for the disk address we are * looking for re-use it. * * Otherwise throw it away and get a new one. */ bp = cur->bc_bufs[level]; if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) { *blkp = XFS_BUF_TO_BLOCK(bp); return 0; } error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); if (error) return error; xfs_btree_setbuf(cur, level, bp); return 0; } /* * Get current search key. For level 0 we don't actually have a key * structure so we make one up from the record. For all other levels * we just return the right key. */ STATIC union xfs_btree_key * xfs_lookup_get_search_key( struct xfs_btree_cur *cur, int level, int keyno, struct xfs_btree_block *block, union xfs_btree_key *kp) { if (level == 0) { cur->bc_ops->init_key_from_rec(kp, xfs_btree_rec_addr(cur, keyno, block)); return kp; } return xfs_btree_key_addr(cur, keyno, block); } /* * Lookup the record. The cursor is made to point to it, based on dir. * stat is set to 0 if can't find any such record, 1 for success. */ int /* error */ xfs_btree_lookup( struct xfs_btree_cur *cur, /* btree cursor */ xfs_lookup_t dir, /* <=, ==, or >= */ int *stat) /* success/failure */ { struct xfs_btree_block *block; /* current btree block */ __int64_t diff; /* difference for the current key */ int error; /* error return value */ int keyno; /* current key number */ int level; /* level in the btree */ union xfs_btree_ptr *pp; /* ptr to btree block */ union xfs_btree_ptr ptr; /* ptr to btree block */ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGI(cur, dir); XFS_BTREE_STATS_INC(cur, lookup); /* No such thing as a zero-level tree. */ if (cur->bc_nlevels == 0) return -EFSCORRUPTED; block = NULL; keyno = 0; /* initialise start pointer from cursor */ cur->bc_ops->init_ptr_from_cur(cur, &ptr); pp = &ptr; /* * Iterate over each level in the btree, starting at the root. * For each level above the leaves, find the key we need, based * on the lookup record, then follow the corresponding block * pointer down to the next level. */ for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { /* Get the block we need to do the lookup on. */ error = xfs_btree_lookup_get_block(cur, level, pp, &block); if (error) goto error0; if (diff == 0) { /* * If we already had a key match at a higher level, we * know we need to use the first entry in this block. */ keyno = 1; } else { /* Otherwise search this block. Do a binary search. */ int high; /* high entry number */ int low; /* low entry number */ /* Set low and high entry numbers, 1-based. */ low = 1; high = xfs_btree_get_numrecs(block); if (!high) { /* Block is empty, must be an empty leaf. */ ASSERT(level == 0 && cur->bc_nlevels == 1); cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; } /* Binary search the block. */ while (low <= high) { union xfs_btree_key key; union xfs_btree_key *kp; XFS_BTREE_STATS_INC(cur, compare); /* keyno is average of low and high. */ keyno = (low + high) >> 1; /* Get current search key */ kp = xfs_lookup_get_search_key(cur, level, keyno, block, &key); /* * Compute difference to get next direction: * - less than, move right * - greater than, move left * - equal, we're done */ diff = cur->bc_ops->key_diff(cur, kp); if (diff < 0) low = keyno + 1; else if (diff > 0) high = keyno - 1; else break; } } /* * If there are more levels, set up for the next level * by getting the block number and filling in the cursor. */ if (level > 0) { /* * If we moved left, need the previous key number, * unless there isn't one. */ if (diff > 0 && --keyno < 1) keyno = 1; pp = xfs_btree_ptr_addr(cur, keyno, block); #ifdef DEBUG error = xfs_btree_check_ptr(cur, pp, 0, level); if (error) goto error0; #endif cur->bc_ptrs[level] = keyno; } } /* Done with the search. See if we need to adjust the results. */ if (dir != XFS_LOOKUP_LE && diff < 0) { keyno++; /* * If ge search and we went off the end of the block, but it's * not the last block, we're in the wrong block. */ xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); if (dir == XFS_LOOKUP_GE && keyno > xfs_btree_get_numrecs(block) && !xfs_btree_ptr_is_null(cur, &ptr)) { int i; cur->bc_ptrs[0] = keyno; error = xfs_btree_increment(cur, 0, &i); if (error) goto error0; XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; } } else if (dir == XFS_LOOKUP_LE && diff > 0) keyno--; cur->bc_ptrs[0] = keyno; /* Return if we succeeded or not. */ if (keyno == 0 || keyno > xfs_btree_get_numrecs(block)) *stat = 0; else if (dir != XFS_LOOKUP_EQ || diff == 0) *stat = 1; else *stat = 0; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* Find the high key storage area from a regular key. */ STATIC union xfs_btree_key * xfs_btree_high_key_from_key( struct xfs_btree_cur *cur, union xfs_btree_key *key) { ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING); return (union xfs_btree_key *)((char *)key + (cur->bc_ops->key_len / 2)); } /* Determine the low (and high if overlapped) keys of a leaf block */ STATIC void xfs_btree_get_leaf_keys( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_key *key) { union xfs_btree_key max_hkey; union xfs_btree_key hkey; union xfs_btree_rec *rec; union xfs_btree_key *high; int n; rec = xfs_btree_rec_addr(cur, 1, block); cur->bc_ops->init_key_from_rec(key, rec); if (cur->bc_flags & XFS_BTREE_OVERLAPPING) { cur->bc_ops->init_high_key_from_rec(&max_hkey, rec); for (n = 2; n <= xfs_btree_get_numrecs(block); n++) { rec = xfs_btree_rec_addr(cur, n, block); cur->bc_ops->init_high_key_from_rec(&hkey, rec); if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey) > 0) max_hkey = hkey; } high = xfs_btree_high_key_from_key(cur, key); memcpy(high, &max_hkey, cur->bc_ops->key_len / 2); } } /* Determine the low (and high if overlapped) keys of a node block */ STATIC void xfs_btree_get_node_keys( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_key *key) { union xfs_btree_key *hkey; union xfs_btree_key *max_hkey; union xfs_btree_key *high; int n; if (cur->bc_flags & XFS_BTREE_OVERLAPPING) { memcpy(key, xfs_btree_key_addr(cur, 1, block), cur->bc_ops->key_len / 2); max_hkey = xfs_btree_high_key_addr(cur, 1, block); for (n = 2; n <= xfs_btree_get_numrecs(block); n++) { hkey = xfs_btree_high_key_addr(cur, n, block); if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0) max_hkey = hkey; } high = xfs_btree_high_key_from_key(cur, key); memcpy(high, max_hkey, cur->bc_ops->key_len / 2); } else { memcpy(key, xfs_btree_key_addr(cur, 1, block), cur->bc_ops->key_len); } } /* Derive the keys for any btree block. */ STATIC void xfs_btree_get_keys( struct xfs_btree_cur *cur, struct xfs_btree_block *block, union xfs_btree_key *key) { if (be16_to_cpu(block->bb_level) == 0) xfs_btree_get_leaf_keys(cur, block, key); else xfs_btree_get_node_keys(cur, block, key); } /* * Decide if we need to update the parent keys of a btree block. For * a standard btree this is only necessary if we're updating the first * record/key. For an overlapping btree, we must always update the * keys because the highest key can be in any of the records or keys * in the block. */ static inline bool xfs_btree_needs_key_update( struct xfs_btree_cur *cur, int ptr) { return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1; } /* * Update the low and high parent keys of the given level, progressing * towards the root. If force_all is false, stop if the keys for a given * level do not need updating. */ STATIC int __xfs_btree_updkeys( struct xfs_btree_cur *cur, int level, struct xfs_btree_block *block, struct xfs_buf *bp0, bool force_all) { union xfs_btree_key key; /* keys from current level */ union xfs_btree_key *lkey; /* keys from the next level up */ union xfs_btree_key *hkey; union xfs_btree_key *nlkey; /* keys from the next level up */ union xfs_btree_key *nhkey; struct xfs_buf *bp; int ptr; ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING); /* Exit if there aren't any parent levels to update. */ if (level + 1 >= cur->bc_nlevels) return 0; trace_xfs_btree_updkeys(cur, level, bp0); lkey = &key; hkey = xfs_btree_high_key_from_key(cur, lkey); xfs_btree_get_keys(cur, block, lkey); for (level++; level < cur->bc_nlevels; level++) { #ifdef DEBUG int error; #endif block = xfs_btree_get_block(cur, level, &bp); trace_xfs_btree_updkeys(cur, level, bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } #endif ptr = cur->bc_ptrs[level]; nlkey = xfs_btree_key_addr(cur, ptr, block); nhkey = xfs_btree_high_key_addr(cur, ptr, block); if (!force_all && !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 || cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0)) break; xfs_btree_copy_keys(cur, nlkey, lkey, 1); xfs_btree_log_keys(cur, bp, ptr, ptr); if (level + 1 >= cur->bc_nlevels) break; xfs_btree_get_node_keys(cur, block, lkey); } return 0; } /* Update all the keys from some level in cursor back to the root. */ STATIC int xfs_btree_updkeys_force( struct xfs_btree_cur *cur, int level) { struct xfs_buf *bp; struct xfs_btree_block *block; block = xfs_btree_get_block(cur, level, &bp); return __xfs_btree_updkeys(cur, level, block, bp, true); } /* * Update the parent keys of the given level, progressing towards the root. */ STATIC int xfs_btree_update_keys( struct xfs_btree_cur *cur, int level) { struct xfs_btree_block *block; struct xfs_buf *bp; union xfs_btree_key *kp; union xfs_btree_key key; int ptr; ASSERT(level >= 0); block = xfs_btree_get_block(cur, level, &bp); if (cur->bc_flags & XFS_BTREE_OVERLAPPING) return __xfs_btree_updkeys(cur, level, block, bp, false); XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGIK(cur, level, keyp); /* * Go up the tree from this level toward the root. * At each level, update the key value to the value input. * Stop when we reach a level where the cursor isn't pointing * at the first entry in the block. */ xfs_btree_get_keys(cur, block, &key); for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { #ifdef DEBUG int error; #endif block = xfs_btree_get_block(cur, level, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } #endif ptr = cur->bc_ptrs[level]; kp = xfs_btree_key_addr(cur, ptr, block); xfs_btree_copy_keys(cur, kp, &key, 1); xfs_btree_log_keys(cur, bp, ptr, ptr); } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; } /* * Update the record referred to by cur to the value in the * given record. This either works (return 0) or gets an * EFSCORRUPTED error. */ int xfs_btree_update( struct xfs_btree_cur *cur, union xfs_btree_rec *rec) { struct xfs_btree_block *block; struct xfs_buf *bp; int error; int ptr; union xfs_btree_rec *rp; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGR(cur, rec); /* Pick up the current block. */ block = xfs_btree_get_block(cur, 0, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, 0, bp); if (error) goto error0; #endif /* Get the address of the rec to be updated. */ ptr = cur->bc_ptrs[0]; rp = xfs_btree_rec_addr(cur, ptr, block); /* Fill in the new contents and log them. */ xfs_btree_copy_recs(cur, rp, rec, 1); xfs_btree_log_recs(cur, bp, ptr, ptr); /* * If we are tracking the last record in the tree and * we are at the far right edge of the tree, update it. */ if (xfs_btree_is_lastrec(cur, block, 0)) { cur->bc_ops->update_lastrec(cur, block, rec, ptr, LASTREC_UPDATE); } /* Pass new key value up to our parent. */ if (xfs_btree_needs_key_update(cur, ptr)) { error = xfs_btree_update_keys(cur, 0); if (error) goto error0; } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* * Move 1 record left from cur/level if possible. * Update cur to reflect the new path. */ STATIC int /* error */ xfs_btree_lshift( struct xfs_btree_cur *cur, int level, int *stat) /* success/failure */ { struct xfs_buf *lbp; /* left buffer pointer */ struct xfs_btree_block *left; /* left btree block */ int lrecs; /* left record count */ struct xfs_buf *rbp; /* right buffer pointer */ struct xfs_btree_block *right; /* right btree block */ struct xfs_btree_cur *tcur; /* temporary btree cursor */ int rrecs; /* right record count */ union xfs_btree_ptr lptr; /* left btree pointer */ union xfs_btree_key *rkp = NULL; /* right btree key */ union xfs_btree_ptr *rpp = NULL; /* right address pointer */ union xfs_btree_rec *rrp = NULL; /* right record pointer */ int error; /* error return value */ int i; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGI(cur, level); if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && level == cur->bc_nlevels - 1) goto out0; /* Set up variables for this block as "right". */ right = xfs_btree_get_block(cur, level, &rbp); #ifdef DEBUG error = xfs_btree_check_block(cur, right, level, rbp); if (error) goto error0; #endif /* If we've got no left sibling then we can't shift an entry left. */ xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); if (xfs_btree_ptr_is_null(cur, &lptr)) goto out0; /* * If the cursor entry is the one that would be moved, don't * do it... it's too complicated. */ if (cur->bc_ptrs[level] <= 1) goto out0; /* Set up the left neighbor as "left". */ error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); if (error) goto error0; /* If it's full, it can't take another entry. */ lrecs = xfs_btree_get_numrecs(left); if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) goto out0; rrecs = xfs_btree_get_numrecs(right); /* * We add one entry to the left side and remove one for the right side. * Account for it here, the changes will be updated on disk and logged * later. */ lrecs++; rrecs--; XFS_BTREE_STATS_INC(cur, lshift); XFS_BTREE_STATS_ADD(cur, moves, 1); /* * If non-leaf, copy a key and a ptr to the left block. * Log the changes to the left block. */ if (level > 0) { /* It's a non-leaf. Move keys and pointers. */ union xfs_btree_key *lkp; /* left btree key */ union xfs_btree_ptr *lpp; /* left address pointer */ lkp = xfs_btree_key_addr(cur, lrecs, left); rkp = xfs_btree_key_addr(cur, 1, right); lpp = xfs_btree_ptr_addr(cur, lrecs, left); rpp = xfs_btree_ptr_addr(cur, 1, right); #ifdef DEBUG error = xfs_btree_check_ptr(cur, rpp, 0, level); if (error) goto error0; #endif xfs_btree_copy_keys(cur, lkp, rkp, 1); xfs_btree_copy_ptrs(cur, lpp, rpp, 1); xfs_btree_log_keys(cur, lbp, lrecs, lrecs); xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); ASSERT(cur->bc_ops->keys_inorder(cur, xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); } else { /* It's a leaf. Move records. */ union xfs_btree_rec *lrp; /* left record pointer */ lrp = xfs_btree_rec_addr(cur, lrecs, left); rrp = xfs_btree_rec_addr(cur, 1, right); xfs_btree_copy_recs(cur, lrp, rrp, 1); xfs_btree_log_recs(cur, lbp, lrecs, lrecs); ASSERT(cur->bc_ops->recs_inorder(cur, xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); } xfs_btree_set_numrecs(left, lrecs); xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); xfs_btree_set_numrecs(right, rrecs); xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); /* * Slide the contents of right down one entry. */ XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); if (level > 0) { /* It's a nonleaf. operate on keys and ptrs */ #ifdef DEBUG int i; /* loop index */ for (i = 0; i < rrecs; i++) { error = xfs_btree_check_ptr(cur, rpp, i + 1, level); if (error) goto error0; } #endif xfs_btree_shift_keys(cur, xfs_btree_key_addr(cur, 2, right), -1, rrecs); xfs_btree_shift_ptrs(cur, xfs_btree_ptr_addr(cur, 2, right), -1, rrecs); xfs_btree_log_keys(cur, rbp, 1, rrecs); xfs_btree_log_ptrs(cur, rbp, 1, rrecs); } else { /* It's a leaf. operate on records */ xfs_btree_shift_recs(cur, xfs_btree_rec_addr(cur, 2, right), -1, rrecs); xfs_btree_log_recs(cur, rbp, 1, rrecs); } /* * Using a temporary cursor, update the parent key values of the * block on the left. */ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) { error = xfs_btree_dup_cursor(cur, &tcur); if (error) goto error0; i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0); error = xfs_btree_decrement(tcur, level, &i); if (error) goto error1; /* Update the parent high keys of the left block, if needed. */ error = xfs_btree_update_keys(tcur, level); if (error) goto error1; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); } /* Update the parent keys of the right block. */ error = xfs_btree_update_keys(cur, level); if (error) goto error0; /* Slide the cursor value left one. */ cur->bc_ptrs[level]--; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; error1: XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR); xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); return error; } /* * Move 1 record right from cur/level if possible. * Update cur to reflect the new path. */ STATIC int /* error */ xfs_btree_rshift( struct xfs_btree_cur *cur, int level, int *stat) /* success/failure */ { struct xfs_buf *lbp; /* left buffer pointer */ struct xfs_btree_block *left; /* left btree block */ struct xfs_buf *rbp; /* right buffer pointer */ struct xfs_btree_block *right; /* right btree block */ struct xfs_btree_cur *tcur; /* temporary btree cursor */ union xfs_btree_ptr rptr; /* right block pointer */ union xfs_btree_key *rkp; /* right btree key */ int rrecs; /* right record count */ int lrecs; /* left record count */ int error; /* error return value */ int i; /* loop counter */ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGI(cur, level); if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && (level == cur->bc_nlevels - 1)) goto out0; /* Set up variables for this block as "left". */ left = xfs_btree_get_block(cur, level, &lbp); #ifdef DEBUG error = xfs_btree_check_block(cur, left, level, lbp); if (error) goto error0; #endif /* If we've got no right sibling then we can't shift an entry right. */ xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); if (xfs_btree_ptr_is_null(cur, &rptr)) goto out0; /* * If the cursor entry is the one that would be moved, don't * do it... it's too complicated. */ lrecs = xfs_btree_get_numrecs(left); if (cur->bc_ptrs[level] >= lrecs) goto out0; /* Set up the right neighbor as "right". */ error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); if (error) goto error0; /* If it's full, it can't take another entry. */ rrecs = xfs_btree_get_numrecs(right); if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) goto out0; XFS_BTREE_STATS_INC(cur, rshift); XFS_BTREE_STATS_ADD(cur, moves, rrecs); /* * Make a hole at the start of the right neighbor block, then * copy the last left block entry to the hole. */ if (level > 0) { /* It's a nonleaf. make a hole in the keys and ptrs */ union xfs_btree_key *lkp; union xfs_btree_ptr *lpp; union xfs_btree_ptr *rpp; lkp = xfs_btree_key_addr(cur, lrecs, left); lpp = xfs_btree_ptr_addr(cur, lrecs, left); rkp = xfs_btree_key_addr(cur, 1, right); rpp = xfs_btree_ptr_addr(cur, 1, right); #ifdef DEBUG for (i = rrecs - 1; i >= 0; i--) { error = xfs_btree_check_ptr(cur, rpp, i, level); if (error) goto error0; } #endif xfs_btree_shift_keys(cur, rkp, 1, rrecs); xfs_btree_shift_ptrs(cur, rpp, 1, rrecs); #ifdef DEBUG error = xfs_btree_check_ptr(cur, lpp, 0, level); if (error) goto error0; #endif /* Now put the new data in, and log it. */ xfs_btree_copy_keys(cur, rkp, lkp, 1); xfs_btree_copy_ptrs(cur, rpp, lpp, 1); xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); ASSERT(cur->bc_ops->keys_inorder(cur, rkp, xfs_btree_key_addr(cur, 2, right))); } else { /* It's a leaf. make a hole in the records */ union xfs_btree_rec *lrp; union xfs_btree_rec *rrp; lrp = xfs_btree_rec_addr(cur, lrecs, left); rrp = xfs_btree_rec_addr(cur, 1, right); xfs_btree_shift_recs(cur, rrp, 1, rrecs); /* Now put the new data in, and log it. */ xfs_btree_copy_recs(cur, rrp, lrp, 1); xfs_btree_log_recs(cur, rbp, 1, rrecs + 1); } /* * Decrement and log left's numrecs, bump and log right's numrecs. */ xfs_btree_set_numrecs(left, --lrecs); xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); xfs_btree_set_numrecs(right, ++rrecs); xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); /* * Using a temporary cursor, update the parent key values of the * block on the right. */ error = xfs_btree_dup_cursor(cur, &tcur); if (error) goto error0; i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0); error = xfs_btree_increment(tcur, level, &i); if (error) goto error1; /* Update the parent high keys of the left block, if needed. */ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) { error = xfs_btree_update_keys(cur, level); if (error) goto error1; } /* Update the parent keys of the right block. */ error = xfs_btree_update_keys(tcur, level); if (error) goto error1; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; error1: XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR); xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); return error; } /* * Split cur/level block in half. * Return new block number and the key to its first * record (to be inserted into parent). */ STATIC int /* error */ __xfs_btree_split( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *ptrp, union xfs_btree_key *key, struct xfs_btree_cur **curp, int *stat) /* success/failure */ { union xfs_btree_ptr lptr; /* left sibling block ptr */ struct xfs_buf *lbp; /* left buffer pointer */ struct xfs_btree_block *left; /* left btree block */ union xfs_btree_ptr rptr; /* right sibling block ptr */ struct xfs_buf *rbp; /* right buffer pointer */ struct xfs_btree_block *right; /* right btree block */ union xfs_btree_ptr rrptr; /* right-right sibling ptr */ struct xfs_buf *rrbp; /* right-right buffer pointer */ struct xfs_btree_block *rrblock; /* right-right btree block */ int lrecs; int rrecs; int src_index; int error; /* error return value */ #ifdef DEBUG int i; #endif XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key); XFS_BTREE_STATS_INC(cur, split); /* Set up left block (current one). */ left = xfs_btree_get_block(cur, level, &lbp); #ifdef DEBUG error = xfs_btree_check_block(cur, left, level, lbp); if (error) goto error0; #endif xfs_btree_buf_to_ptr(cur, lbp, &lptr); /* Allocate the new block. If we can't do it, we're toast. Give up. */ error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, stat); if (error) goto error0; if (*stat == 0) goto out0; XFS_BTREE_STATS_INC(cur, alloc); /* Set up the new block as "right". */ error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp); if (error) goto error0; /* Fill in the btree header for the new right block. */ xfs_btree_init_block_cur(cur, rbp, xfs_btree_get_level(left), 0); /* * Split the entries between the old and the new block evenly. * Make sure that if there's an odd number of entries now, that * each new block will have the same number of entries. */ lrecs = xfs_btree_get_numrecs(left); rrecs = lrecs / 2; if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1) rrecs++; src_index = (lrecs - rrecs + 1); XFS_BTREE_STATS_ADD(cur, moves, rrecs); /* Adjust numrecs for the later get_*_keys() calls. */ lrecs -= rrecs; xfs_btree_set_numrecs(left, lrecs); xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs); /* * Copy btree block entries from the left block over to the * new block, the right. Update the right block and log the * changes. */ if (level > 0) { /* It's a non-leaf. Move keys and pointers. */ union xfs_btree_key *lkp; /* left btree key */ union xfs_btree_ptr *lpp; /* left address pointer */ union xfs_btree_key *rkp; /* right btree key */ union xfs_btree_ptr *rpp; /* right address pointer */ lkp = xfs_btree_key_addr(cur, src_index, left); lpp = xfs_btree_ptr_addr(cur, src_index, left); rkp = xfs_btree_key_addr(cur, 1, right); rpp = xfs_btree_ptr_addr(cur, 1, right); #ifdef DEBUG for (i = src_index; i < rrecs; i++) { error = xfs_btree_check_ptr(cur, lpp, i, level); if (error) goto error0; } #endif /* Copy the keys & pointers to the new block. */ xfs_btree_copy_keys(cur, rkp, lkp, rrecs); xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs); xfs_btree_log_keys(cur, rbp, 1, rrecs); xfs_btree_log_ptrs(cur, rbp, 1, rrecs); /* Stash the keys of the new block for later insertion. */ xfs_btree_get_node_keys(cur, right, key); } else { /* It's a leaf. Move records. */ union xfs_btree_rec *lrp; /* left record pointer */ union xfs_btree_rec *rrp; /* right record pointer */ lrp = xfs_btree_rec_addr(cur, src_index, left); rrp = xfs_btree_rec_addr(cur, 1, right); /* Copy records to the new block. */ xfs_btree_copy_recs(cur, rrp, lrp, rrecs); xfs_btree_log_recs(cur, rbp, 1, rrecs); /* Stash the keys of the new block for later insertion. */ xfs_btree_get_leaf_keys(cur, right, key); } /* * Find the left block number by looking in the buffer. * Adjust sibling pointers. */ xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB); xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB); xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS); xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); /* * If there's a block to the new block's right, make that block * point back to right instead of to left. */ if (!xfs_btree_ptr_is_null(cur, &rrptr)) { error = xfs_btree_read_buf_block(cur, &rrptr, 0, &rrblock, &rrbp); if (error) goto error0; xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB); xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); } /* Update the parent high keys of the left block, if needed. */ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) { error = xfs_btree_update_keys(cur, level); if (error) goto error0; } /* * If the cursor is really in the right block, move it there. * If it's just pointing past the last entry in left, then we'll * insert there, so don't change anything in that case. */ if (cur->bc_ptrs[level] > lrecs + 1) { xfs_btree_setbuf(cur, level, rbp); cur->bc_ptrs[level] -= lrecs; } /* * If there are more levels, we'll need another cursor which refers * the right block, no matter where this cursor was. */ if (level + 1 < cur->bc_nlevels) { error = xfs_btree_dup_cursor(cur, curp); if (error) goto error0; (*curp)->bc_ptrs[level + 1]++; } *ptrp = rptr; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } struct xfs_btree_split_args { struct xfs_btree_cur *cur; int level; union xfs_btree_ptr *ptrp; union xfs_btree_key *key; struct xfs_btree_cur **curp; int *stat; /* success/failure */ int result; bool kswapd; /* allocation in kswapd context */ struct completion *done; struct work_struct work; }; /* * Stack switching interfaces for allocation */ static void xfs_btree_split_worker( struct work_struct *work) { struct xfs_btree_split_args *args = container_of(work, struct xfs_btree_split_args, work); unsigned long pflags; unsigned long new_pflags = PF_FSTRANS; /* * we are in a transaction context here, but may also be doing work * in kswapd context, and hence we may need to inherit that state * temporarily to ensure that we don't block waiting for memory reclaim * in any way. */ if (args->kswapd) new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; current_set_flags_nested(&pflags, new_pflags); args->result = __xfs_btree_split(args->cur, args->level, args->ptrp, args->key, args->curp, args->stat); complete(args->done); current_restore_flags_nested(&pflags, new_pflags); } /* * BMBT split requests often come in with little stack to work on. Push * them off to a worker thread so there is lots of stack to use. For the other * btree types, just call directly to avoid the context switch overhead here. */ STATIC int /* error */ xfs_btree_split( struct xfs_btree_cur *cur, int level, union xfs_btree_ptr *ptrp, union xfs_btree_key *key, struct xfs_btree_cur **curp, int *stat) /* success/failure */ { struct xfs_btree_split_args args; DECLARE_COMPLETION_ONSTACK(done); if (cur->bc_btnum != XFS_BTNUM_BMAP) return __xfs_btree_split(cur, level, ptrp, key, curp, stat); args.cur = cur; args.level = level; args.ptrp = ptrp; args.key = key; args.curp = curp; args.stat = stat; args.done = &done; args.kswapd = current_is_kswapd(); INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker); queue_work(xfs_alloc_wq, &args.work); wait_for_completion(&done); destroy_work_on_stack(&args.work); return args.result; } /* * Copy the old inode root contents into a real block and make the * broot point to it. */ int /* error */ xfs_btree_new_iroot( struct xfs_btree_cur *cur, /* btree cursor */ int *logflags, /* logging flags for inode */ int *stat) /* return status - 0 fail */ { struct xfs_buf *cbp; /* buffer for cblock */ struct xfs_btree_block *block; /* btree block */ struct xfs_btree_block *cblock; /* child btree block */ union xfs_btree_key *ckp; /* child key pointer */ union xfs_btree_ptr *cpp; /* child ptr pointer */ union xfs_btree_key *kp; /* pointer to btree key */ union xfs_btree_ptr *pp; /* pointer to block addr */ union xfs_btree_ptr nptr; /* new block addr */ int level; /* btree level */ int error; /* error return code */ #ifdef DEBUG int i; /* loop counter */ #endif XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_STATS_INC(cur, newroot); ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); level = cur->bc_nlevels - 1; block = xfs_btree_get_iroot(cur); pp = xfs_btree_ptr_addr(cur, 1, block); /* Allocate the new block. If we can't do it, we're toast. Give up. */ error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat); if (error) goto error0; if (*stat == 0) { XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; } XFS_BTREE_STATS_INC(cur, alloc); /* Copy the root into a real block. */ error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp); if (error) goto error0; /* * we can't just memcpy() the root in for CRC enabled btree blocks. * In that case have to also ensure the blkno remains correct */ memcpy(cblock, block, xfs_btree_block_len(cur)); if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn); else cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn); } be16_add_cpu(&block->bb_level, 1); xfs_btree_set_numrecs(block, 1); cur->bc_nlevels++; cur->bc_ptrs[level + 1] = 1; kp = xfs_btree_key_addr(cur, 1, block); ckp = xfs_btree_key_addr(cur, 1, cblock); xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock)); cpp = xfs_btree_ptr_addr(cur, 1, cblock); #ifdef DEBUG for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { error = xfs_btree_check_ptr(cur, pp, i, level); if (error) goto error0; } #endif xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); #ifdef DEBUG error = xfs_btree_check_ptr(cur, &nptr, 0, level); if (error) goto error0; #endif xfs_btree_copy_ptrs(cur, pp, &nptr, 1); xfs_iroot_realloc(cur->bc_private.b.ip, 1 - xfs_btree_get_numrecs(cblock), cur->bc_private.b.whichfork); xfs_btree_setbuf(cur, level, cbp); /* * Do all this logging at the end so that * the root is at the right level. */ xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); *logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork); *stat = 1; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* * Allocate a new root block, fill it in. */ STATIC int /* error */ xfs_btree_new_root( struct xfs_btree_cur *cur, /* btree cursor */ int *stat) /* success/failure */ { struct xfs_btree_block *block; /* one half of the old root block */ struct xfs_buf *bp; /* buffer containing block */ int error; /* error return value */ struct xfs_buf *lbp; /* left buffer pointer */ struct xfs_btree_block *left; /* left btree block */ struct xfs_buf *nbp; /* new (root) buffer */ struct xfs_btree_block *new; /* new (root) btree block */ int nptr; /* new value for key index, 1 or 2 */ struct xfs_buf *rbp; /* right buffer pointer */ struct xfs_btree_block *right; /* right btree block */ union xfs_btree_ptr rptr; union xfs_btree_ptr lptr; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_STATS_INC(cur, newroot); /* initialise our start point from the cursor */ cur->bc_ops->init_ptr_from_cur(cur, &rptr); /* Allocate the new block. If we can't do it, we're toast. Give up. */ error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, stat); if (error) goto error0; if (*stat == 0) goto out0; XFS_BTREE_STATS_INC(cur, alloc); /* Set up the new block. */ error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp); if (error) goto error0; /* Set the root in the holding structure increasing the level by 1. */ cur->bc_ops->set_root(cur, &lptr, 1); /* * At the previous root level there are now two blocks: the old root, * and the new block generated when it was split. We don't know which * one the cursor is pointing at, so we set up variables "left" and * "right" for each case. */ block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); if (error) goto error0; #endif xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); if (!xfs_btree_ptr_is_null(cur, &rptr)) { /* Our block is left, pick up the right block. */ lbp = bp; xfs_btree_buf_to_ptr(cur, lbp, &lptr); left = block; error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); if (error) goto error0; bp = rbp; nptr = 1; } else { /* Our block is right, pick up the left block. */ rbp = bp; xfs_btree_buf_to_ptr(cur, rbp, &rptr); right = block; xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); if (error) goto error0; bp = lbp; nptr = 2; } /* Fill in the new block's btree header and log it. */ xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2); xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && !xfs_btree_ptr_is_null(cur, &rptr)); /* Fill in the key data in the new root. */ if (xfs_btree_get_level(left) > 0) { /* * Get the keys for the left block's keys and put them directly * in the parent block. Do the same for the right block. */ xfs_btree_get_node_keys(cur, left, xfs_btree_key_addr(cur, 1, new)); xfs_btree_get_node_keys(cur, right, xfs_btree_key_addr(cur, 2, new)); } else { /* * Get the keys for the left block's records and put them * directly in the parent block. Do the same for the right * block. */ xfs_btree_get_leaf_keys(cur, left, xfs_btree_key_addr(cur, 1, new)); xfs_btree_get_leaf_keys(cur, right, xfs_btree_key_addr(cur, 2, new)); } xfs_btree_log_keys(cur, nbp, 1, 2); /* Fill in the pointer data in the new root. */ xfs_btree_copy_ptrs(cur, xfs_btree_ptr_addr(cur, 1, new), &lptr, 1); xfs_btree_copy_ptrs(cur, xfs_btree_ptr_addr(cur, 2, new), &rptr, 1); xfs_btree_log_ptrs(cur, nbp, 1, 2); /* Fix up the cursor. */ xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); cur->bc_ptrs[cur->bc_nlevels] = nptr; cur->bc_nlevels++; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; } STATIC int xfs_btree_make_block_unfull( struct xfs_btree_cur *cur, /* btree cursor */ int level, /* btree level */ int numrecs,/* # of recs in block */ int *oindex,/* old tree index */ int *index, /* new tree index */ union xfs_btree_ptr *nptr, /* new btree ptr */ struct xfs_btree_cur **ncur, /* new btree cursor */ union xfs_btree_key *key, /* key of new block */ int *stat) { int error = 0; if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && level == cur->bc_nlevels - 1) { struct xfs_inode *ip = cur->bc_private.b.ip; if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { /* A root block that can be made bigger. */ xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork); *stat = 1; } else { /* A root block that needs replacing */ int logflags = 0; error = xfs_btree_new_iroot(cur, &logflags, stat); if (error || *stat == 0) return error; xfs_trans_log_inode(cur->bc_tp, ip, logflags); } return 0; } /* First, try shifting an entry to the right neighbor. */ error = xfs_btree_rshift(cur, level, stat); if (error || *stat) return error; /* Next, try shifting an entry to the left neighbor. */ error = xfs_btree_lshift(cur, level, stat); if (error) return error; if (*stat) { *oindex = *index = cur->bc_ptrs[level]; return 0; } /* * Next, try splitting the current block in half. * * If this works we have to re-set our variables because we * could be in a different block now. */ error = xfs_btree_split(cur, level, nptr, key, ncur, stat); if (error || *stat == 0) return error; *index = cur->bc_ptrs[level]; return 0; } /* * Insert one record/level. Return information to the caller * allowing the next level up to proceed if necessary. */ STATIC int xfs_btree_insrec( struct xfs_btree_cur *cur, /* btree cursor */ int level, /* level to insert record at */ union xfs_btree_ptr *ptrp, /* i/o: block number inserted */ union xfs_btree_rec *rec, /* record to insert */ union xfs_btree_key *key, /* i/o: block key for ptrp */ struct xfs_btree_cur **curp, /* output: new cursor replacing cur */ int *stat) /* success/failure */ { struct xfs_btree_block *block; /* btree block */ struct xfs_buf *bp; /* buffer for block */ union xfs_btree_ptr nptr; /* new block ptr */ struct xfs_btree_cur *ncur; /* new btree cursor */ union xfs_btree_key nkey; /* new block key */ union xfs_btree_key *lkey; int optr; /* old key/record index */ int ptr; /* key/record index */ int numrecs;/* number of records */ int error; /* error return value */ #ifdef DEBUG int i; #endif xfs_daddr_t old_bn; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec); ncur = NULL; lkey = &nkey; /* * If we have an external root pointer, and we've made it to the * root level, allocate a new root block and we're done. */ if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && (level >= cur->bc_nlevels)) { error = xfs_btree_new_root(cur, stat); xfs_btree_set_ptr_null(cur, ptrp); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return error; } /* If we're off the left edge, return failure. */ ptr = cur->bc_ptrs[level]; if (ptr == 0) { XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; } optr = ptr; XFS_BTREE_STATS_INC(cur, insrec); /* Get pointers to the btree buffer and block. */ block = xfs_btree_get_block(cur, level, &bp); old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL; numrecs = xfs_btree_get_numrecs(block); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) goto error0; /* Check that the new entry is being inserted in the right place. */ if (ptr <= numrecs) { if (level == 0) { ASSERT(cur->bc_ops->recs_inorder(cur, rec, xfs_btree_rec_addr(cur, ptr, block))); } else { ASSERT(cur->bc_ops->keys_inorder(cur, key, xfs_btree_key_addr(cur, ptr, block))); } } #endif /* * If the block is full, we can't insert the new entry until we * make the block un-full. */ xfs_btree_set_ptr_null(cur, &nptr); if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { error = xfs_btree_make_block_unfull(cur, level, numrecs, &optr, &ptr, &nptr, &ncur, lkey, stat); if (error || *stat == 0) goto error0; } /* * The current block may have changed if the block was * previously full and we have just made space in it. */ block = xfs_btree_get_block(cur, level, &bp); numrecs = xfs_btree_get_numrecs(block); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) return error; #endif /* * At this point we know there's room for our new entry in the block * we're pointing at. */ XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); if (level > 0) { /* It's a nonleaf. make a hole in the keys and ptrs */ union xfs_btree_key *kp; union xfs_btree_ptr *pp; kp = xfs_btree_key_addr(cur, ptr, block); pp = xfs_btree_ptr_addr(cur, ptr, block); #ifdef DEBUG for (i = numrecs - ptr; i >= 0; i--) { error = xfs_btree_check_ptr(cur, pp, i, level); if (error) return error; } #endif xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); #ifdef DEBUG error = xfs_btree_check_ptr(cur, ptrp, 0, level); if (error) goto error0; #endif /* Now put the new data in, bump numrecs and log it. */ xfs_btree_copy_keys(cur, kp, key, 1); xfs_btree_copy_ptrs(cur, pp, ptrp, 1); numrecs++; xfs_btree_set_numrecs(block, numrecs); xfs_btree_log_ptrs(cur, bp, ptr, numrecs); xfs_btree_log_keys(cur, bp, ptr, numrecs); #ifdef DEBUG if (ptr < numrecs) { ASSERT(cur->bc_ops->keys_inorder(cur, kp, xfs_btree_key_addr(cur, ptr + 1, block))); } #endif } else { /* It's a leaf. make a hole in the records */ union xfs_btree_rec *rp; rp = xfs_btree_rec_addr(cur, ptr, block); xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); /* Now put the new data in, bump numrecs and log it. */ xfs_btree_copy_recs(cur, rp, rec, 1); xfs_btree_set_numrecs(block, ++numrecs); xfs_btree_log_recs(cur, bp, ptr, numrecs); #ifdef DEBUG if (ptr < numrecs) { ASSERT(cur->bc_ops->recs_inorder(cur, rp, xfs_btree_rec_addr(cur, ptr + 1, block))); } #endif } /* Log the new number of records in the btree header. */ xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); /* * If we just inserted into a new tree block, we have to * recalculate nkey here because nkey is out of date. * * Otherwise we're just updating an existing block (having shoved * some records into the new tree block), so use the regular key * update mechanism. */ if (bp && bp->b_bn != old_bn) { xfs_btree_get_keys(cur, block, lkey); } else if (xfs_btree_needs_key_update(cur, optr)) { error = xfs_btree_update_keys(cur, level); if (error) goto error0; } /* * If we are tracking the last record in the tree and * we are at the far right edge of the tree, update it. */ if (xfs_btree_is_lastrec(cur, block, level)) { cur->bc_ops->update_lastrec(cur, block, rec, ptr, LASTREC_INSREC); } /* * Return the new block number, if any. * If there is one, give back a record value and a cursor too. */ *ptrp = nptr; if (!xfs_btree_ptr_is_null(cur, &nptr)) { xfs_btree_copy_keys(cur, key, lkey, 1); *curp = ncur; } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* * Insert the record at the point referenced by cur. * * A multi-level split of the tree on insert will invalidate the original * cursor. All callers of this function should assume that the cursor is * no longer valid and revalidate it. */ int xfs_btree_insert( struct xfs_btree_cur *cur, int *stat) { int error; /* error return value */ int i; /* result value, 0 for failure */ int level; /* current level number in btree */ union xfs_btree_ptr nptr; /* new block number (split result) */ struct xfs_btree_cur *ncur; /* new cursor (split result) */ struct xfs_btree_cur *pcur; /* previous level's cursor */ union xfs_btree_key bkey; /* key of block to insert */ union xfs_btree_key *key; union xfs_btree_rec rec; /* record to insert */ level = 0; ncur = NULL; pcur = cur; key = &bkey; xfs_btree_set_ptr_null(cur, &nptr); /* Make a key out of the record data to be inserted, and save it. */ cur->bc_ops->init_rec_from_cur(cur, &rec); cur->bc_ops->init_key_from_rec(key, &rec); /* * Loop going up the tree, starting at the leaf level. * Stop when we don't get a split block, that must mean that * the insert is finished with this level. */ do { /* * Insert nrec/nptr into this level of the tree. * Note if we fail, nptr will be null. */ error = xfs_btree_insrec(pcur, level, &nptr, &rec, key, &ncur, &i); if (error) { if (pcur != cur) xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); goto error0; } XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); level++; /* * See if the cursor we just used is trash. * Can't trash the caller's cursor, but otherwise we should * if ncur is a new cursor or we're about to be done. */ if (pcur != cur && (ncur || xfs_btree_ptr_is_null(cur, &nptr))) { /* Save the state from the cursor before we trash it */ if (cur->bc_ops->update_cursor) cur->bc_ops->update_cursor(pcur, cur); cur->bc_nlevels = pcur->bc_nlevels; xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); } /* If we got a new cursor, switch to it. */ if (ncur) { pcur = ncur; ncur = NULL; } } while (!xfs_btree_ptr_is_null(cur, &nptr)); XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = i; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* * Try to merge a non-leaf block back into the inode root. * * Note: the killroot names comes from the fact that we're effectively * killing the old root block. But because we can't just delete the * inode we have to copy the single block it was pointing to into the * inode. */ STATIC int xfs_btree_kill_iroot( struct xfs_btree_cur *cur) { int whichfork = cur->bc_private.b.whichfork; struct xfs_inode *ip = cur->bc_private.b.ip; struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); struct xfs_btree_block *block; struct xfs_btree_block *cblock; union xfs_btree_key *kp; union xfs_btree_key *ckp; union xfs_btree_ptr *pp; union xfs_btree_ptr *cpp; struct xfs_buf *cbp; int level; int index; int numrecs; int error; #ifdef DEBUG union xfs_btree_ptr ptr; int i; #endif XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); ASSERT(cur->bc_nlevels > 1); /* * Don't deal with the root block needs to be a leaf case. * We're just going to turn the thing back into extents anyway. */ level = cur->bc_nlevels - 1; if (level == 1) goto out0; /* * Give up if the root has multiple children. */ block = xfs_btree_get_iroot(cur); if (xfs_btree_get_numrecs(block) != 1) goto out0; cblock = xfs_btree_get_block(cur, level - 1, &cbp); numrecs = xfs_btree_get_numrecs(cblock); /* * Only do this if the next level will fit. * Then the data must be copied up to the inode, * instead of freeing the root you free the next level. */ if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) goto out0; XFS_BTREE_STATS_INC(cur, killroot); #ifdef DEBUG xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); #endif index = numrecs - cur->bc_ops->get_maxrecs(cur, level); if (index) { xfs_iroot_realloc(cur->bc_private.b.ip, index, cur->bc_private.b.whichfork); block = ifp->if_broot; } be16_add_cpu(&block->bb_numrecs, index); ASSERT(block->bb_numrecs == cblock->bb_numrecs); kp = xfs_btree_key_addr(cur, 1, block); ckp = xfs_btree_key_addr(cur, 1, cblock); xfs_btree_copy_keys(cur, kp, ckp, numrecs); pp = xfs_btree_ptr_addr(cur, 1, block); cpp = xfs_btree_ptr_addr(cur, 1, cblock); #ifdef DEBUG for (i = 0; i < numrecs; i++) { error = xfs_btree_check_ptr(cur, cpp, i, level - 1); if (error) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } } #endif xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); error = xfs_btree_free_block(cur, cbp); if (error) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } cur->bc_bufs[level - 1] = NULL; be16_add_cpu(&block->bb_level, -1); xfs_trans_log_inode(cur->bc_tp, ip, XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork)); cur->bc_nlevels--; out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; } /* * Kill the current root node, and replace it with it's only child node. */ STATIC int xfs_btree_kill_root( struct xfs_btree_cur *cur, struct xfs_buf *bp, int level, union xfs_btree_ptr *newroot) { int error; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_STATS_INC(cur, killroot); /* * Update the root pointer, decreasing the level by 1 and then * free the old root. */ cur->bc_ops->set_root(cur, newroot, -1); error = xfs_btree_free_block(cur, bp); if (error) { XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } cur->bc_bufs[level] = NULL; cur->bc_ra[level] = 0; cur->bc_nlevels--; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; } STATIC int xfs_btree_dec_cursor( struct xfs_btree_cur *cur, int level, int *stat) { int error; int i; if (level > 0) { error = xfs_btree_decrement(cur, level, &i); if (error) return error; } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; } /* * Single level of the btree record deletion routine. * Delete record pointed to by cur/level. * Remove the record from its block then rebalance the tree. * Return 0 for error, 1 for done, 2 to go on to the next level. */ STATIC int /* error */ xfs_btree_delrec( struct xfs_btree_cur *cur, /* btree cursor */ int level, /* level removing record from */ int *stat) /* fail/done/go-on */ { struct xfs_btree_block *block; /* btree block */ union xfs_btree_ptr cptr; /* current block ptr */ struct xfs_buf *bp; /* buffer for block */ int error; /* error return value */ int i; /* loop counter */ union xfs_btree_ptr lptr; /* left sibling block ptr */ struct xfs_buf *lbp; /* left buffer pointer */ struct xfs_btree_block *left; /* left btree block */ int lrecs = 0; /* left record count */ int ptr; /* key/record index */ union xfs_btree_ptr rptr; /* right sibling block ptr */ struct xfs_buf *rbp; /* right buffer pointer */ struct xfs_btree_block *right; /* right btree block */ struct xfs_btree_block *rrblock; /* right-right btree block */ struct xfs_buf *rrbp; /* right-right buffer pointer */ int rrecs = 0; /* right record count */ struct xfs_btree_cur *tcur; /* temporary btree cursor */ int numrecs; /* temporary numrec count */ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); XFS_BTREE_TRACE_ARGI(cur, level); tcur = NULL; /* Get the index of the entry being deleted, check for nothing there. */ ptr = cur->bc_ptrs[level]; if (ptr == 0) { XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; } /* Get the buffer & block containing the record or key/ptr. */ block = xfs_btree_get_block(cur, level, &bp); numrecs = xfs_btree_get_numrecs(block); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) goto error0; #endif /* Fail if we're off the end of the block. */ if (ptr > numrecs) { XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 0; return 0; } XFS_BTREE_STATS_INC(cur, delrec); XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); /* Excise the entries being deleted. */ if (level > 0) { /* It's a nonleaf. operate on keys and ptrs */ union xfs_btree_key *lkp; union xfs_btree_ptr *lpp; lkp = xfs_btree_key_addr(cur, ptr + 1, block); lpp = xfs_btree_ptr_addr(cur, ptr + 1, block); #ifdef DEBUG for (i = 0; i < numrecs - ptr; i++) { error = xfs_btree_check_ptr(cur, lpp, i, level); if (error) goto error0; } #endif if (ptr < numrecs) { xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); } } else { /* It's a leaf. operate on records */ if (ptr < numrecs) { xfs_btree_shift_recs(cur, xfs_btree_rec_addr(cur, ptr + 1, block), -1, numrecs - ptr); xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); } } /* * Decrement and log the number of entries in the block. */ xfs_btree_set_numrecs(block, --numrecs); xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); /* * If we are tracking the last record in the tree and * we are at the far right edge of the tree, update it. */ if (xfs_btree_is_lastrec(cur, block, level)) { cur->bc_ops->update_lastrec(cur, block, NULL, ptr, LASTREC_DELREC); } /* * We're at the root level. First, shrink the root block in-memory. * Try to get rid of the next level down. If we can't then there's * nothing left to do. */ if (level == cur->bc_nlevels - 1) { if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { xfs_iroot_realloc(cur->bc_private.b.ip, -1, cur->bc_private.b.whichfork); error = xfs_btree_kill_iroot(cur); if (error) goto error0; error = xfs_btree_dec_cursor(cur, level, stat); if (error) goto error0; *stat = 1; return 0; } /* * If this is the root level, and there's only one entry left, * and it's NOT the leaf level, then we can get rid of this * level. */ if (numrecs == 1 && level > 0) { union xfs_btree_ptr *pp; /* * pp is still set to the first pointer in the block. * Make it the new root of the btree. */ pp = xfs_btree_ptr_addr(cur, 1, block); error = xfs_btree_kill_root(cur, bp, level, pp); if (error) goto error0; } else if (level > 0) { error = xfs_btree_dec_cursor(cur, level, stat); if (error) goto error0; } *stat = 1; return 0; } /* * If we deleted the leftmost entry in the block, update the * key values above us in the tree. */ if (xfs_btree_needs_key_update(cur, ptr)) { error = xfs_btree_update_keys(cur, level); if (error) goto error0; } /* * If the number of records remaining in the block is at least * the minimum, we're done. */ if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { error = xfs_btree_dec_cursor(cur, level, stat); if (error) goto error0; return 0; } /* * Otherwise, we have to move some records around to keep the * tree balanced. Look at the left and right sibling blocks to * see if we can re-balance by moving only one record. */ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { /* * One child of root, need to get a chance to copy its contents * into the root and delete it. Can't go up to next level, * there's nothing to delete there. */ if (xfs_btree_ptr_is_null(cur, &rptr) && xfs_btree_ptr_is_null(cur, &lptr) && level == cur->bc_nlevels - 2) { error = xfs_btree_kill_iroot(cur); if (!error) error = xfs_btree_dec_cursor(cur, level, stat); if (error) goto error0; return 0; } } ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) || !xfs_btree_ptr_is_null(cur, &lptr)); /* * Duplicate the cursor so our btree manipulations here won't * disrupt the next level up. */ error = xfs_btree_dup_cursor(cur, &tcur); if (error) goto error0; /* * If there's a right sibling, see if it's ok to shift an entry * out of it. */ if (!xfs_btree_ptr_is_null(cur, &rptr)) { /* * Move the temp cursor to the last entry in the next block. * Actually any entry but the first would suffice. */ i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_increment(tcur, level, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); /* Grab a pointer to the block. */ right = xfs_btree_get_block(tcur, level, &rbp); #ifdef DEBUG error = xfs_btree_check_block(tcur, right, level, rbp); if (error) goto error0; #endif /* Grab the current block number, for future use. */ xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB); /* * If right block is full enough so that removing one entry * won't make it too empty, and left-shifting an entry out * of right to us works, we're done. */ if (xfs_btree_get_numrecs(right) - 1 >= cur->bc_ops->get_minrecs(tcur, level)) { error = xfs_btree_lshift(tcur, level, &i); if (error) goto error0; if (i) { ASSERT(xfs_btree_get_numrecs(block) >= cur->bc_ops->get_minrecs(tcur, level)); xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; error = xfs_btree_dec_cursor(cur, level, stat); if (error) goto error0; return 0; } } /* * Otherwise, grab the number of records in right for * future reference, and fix up the temp cursor to point * to our block again (last record). */ rrecs = xfs_btree_get_numrecs(right); if (!xfs_btree_ptr_is_null(cur, &lptr)) { i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_decrement(tcur, level, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); } } /* * If there's a left sibling, see if it's ok to shift an entry * out of it. */ if (!xfs_btree_ptr_is_null(cur, &lptr)) { /* * Move the temp cursor to the first entry in the * previous block. */ i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); error = xfs_btree_decrement(tcur, level, &i); if (error) goto error0; i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0); /* Grab a pointer to the block. */ left = xfs_btree_get_block(tcur, level, &lbp); #ifdef DEBUG error = xfs_btree_check_block(cur, left, level, lbp); if (error) goto error0; #endif /* Grab the current block number, for future use. */ xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB); /* * If left block is full enough so that removing one entry * won't make it too empty, and right-shifting an entry out * of left to us works, we're done. */ if (xfs_btree_get_numrecs(left) - 1 >= cur->bc_ops->get_minrecs(tcur, level)) { error = xfs_btree_rshift(tcur, level, &i); if (error) goto error0; if (i) { ASSERT(xfs_btree_get_numrecs(block) >= cur->bc_ops->get_minrecs(tcur, level)); xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; if (level == 0) cur->bc_ptrs[0]++; XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = 1; return 0; } } /* * Otherwise, grab the number of records in right for * future reference. */ lrecs = xfs_btree_get_numrecs(left); } /* Delete the temp cursor, we're done with it. */ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; /* If here, we need to do a join to keep the tree balanced. */ ASSERT(!xfs_btree_ptr_is_null(cur, &cptr)); if (!xfs_btree_ptr_is_null(cur, &lptr) && lrecs + xfs_btree_get_numrecs(block) <= cur->bc_ops->get_maxrecs(cur, level)) { /* * Set "right" to be the starting block, * "left" to be the left neighbor. */ rptr = cptr; right = block; rbp = bp; error = xfs_btree_read_buf_block(cur, &lptr, 0, &left, &lbp); if (error) goto error0; /* * If that won't work, see if we can join with the right neighbor block. */ } else if (!xfs_btree_ptr_is_null(cur, &rptr) && rrecs + xfs_btree_get_numrecs(block) <= cur->bc_ops->get_maxrecs(cur, level)) { /* * Set "left" to be the starting block, * "right" to be the right neighbor. */ lptr = cptr; left = block; lbp = bp; error = xfs_btree_read_buf_block(cur, &rptr, 0, &right, &rbp); if (error) goto error0; /* * Otherwise, we can't fix the imbalance. * Just return. This is probably a logic error, but it's not fatal. */ } else { error = xfs_btree_dec_cursor(cur, level, stat); if (error) goto error0; return 0; } rrecs = xfs_btree_get_numrecs(right); lrecs = xfs_btree_get_numrecs(left); /* * We're now going to join "left" and "right" by moving all the stuff * in "right" to "left" and deleting "right". */ XFS_BTREE_STATS_ADD(cur, moves, rrecs); if (level > 0) { /* It's a non-leaf. Move keys and pointers. */ union xfs_btree_key *lkp; /* left btree key */ union xfs_btree_ptr *lpp; /* left address pointer */ union xfs_btree_key *rkp; /* right btree key */ union xfs_btree_ptr *rpp; /* right address pointer */ lkp = xfs_btree_key_addr(cur, lrecs + 1, left); lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left); rkp = xfs_btree_key_addr(cur, 1, right); rpp = xfs_btree_ptr_addr(cur, 1, right); #ifdef DEBUG for (i = 1; i < rrecs; i++) { error = xfs_btree_check_ptr(cur, rpp, i, level); if (error) goto error0; } #endif xfs_btree_copy_keys(cur, lkp, rkp, rrecs); xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs); xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); } else { /* It's a leaf. Move records. */ union xfs_btree_rec *lrp; /* left record pointer */ union xfs_btree_rec *rrp; /* right record pointer */ lrp = xfs_btree_rec_addr(cur, lrecs + 1, left); rrp = xfs_btree_rec_addr(cur, 1, right); xfs_btree_copy_recs(cur, lrp, rrp, rrecs); xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); } XFS_BTREE_STATS_INC(cur, join); /* * Fix up the number of records and right block pointer in the * surviving block, and log it. */ xfs_btree_set_numrecs(left, lrecs + rrecs); xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB), xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); /* If there is a right sibling, point it to the remaining block. */ xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); if (!xfs_btree_ptr_is_null(cur, &cptr)) { error = xfs_btree_read_buf_block(cur, &cptr, 0, &rrblock, &rrbp); if (error) goto error0; xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB); xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); } /* Free the deleted block. */ error = xfs_btree_free_block(cur, rbp); if (error) goto error0; /* * If we joined with the left neighbor, set the buffer in the * cursor to the left block, and fix up the index. */ if (bp != lbp) { cur->bc_bufs[level] = lbp; cur->bc_ptrs[level] += lrecs; cur->bc_ra[level] = 0; } /* * If we joined with the right neighbor and there's a level above * us, increment the cursor at that level. */ else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || (level + 1 < cur->bc_nlevels)) { error = xfs_btree_increment(cur, level + 1, &i); if (error) goto error0; } /* * Readjust the ptr at this level if it's not a leaf, since it's * still pointing at the deletion point, which makes the cursor * inconsistent. If this makes the ptr 0, the caller fixes it up. * We can't use decrement because it would change the next level up. */ if (level > 0) cur->bc_ptrs[level]--; /* * We combined blocks, so we have to update the parent keys if the * btree supports overlapped intervals. However, bc_ptrs[level + 1] * points to the old block so that the caller knows which record to * delete. Therefore, the caller must be savvy enough to call updkeys * for us if we return stat == 2. The other exit points from this * function don't require deletions further up the tree, so they can * call updkeys directly. */ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); /* Return value means the next level up has something to do. */ *stat = 2; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); if (tcur) xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); return error; } /* * Delete the record pointed to by cur. * The cursor refers to the place where the record was (could be inserted) * when the operation returns. */ int /* error */ xfs_btree_delete( struct xfs_btree_cur *cur, int *stat) /* success/failure */ { int error; /* error return value */ int level; int i; bool joined = false; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); /* * Go up the tree, starting at leaf level. * * If 2 is returned then a join was done; go to the next level. * Otherwise we are done. */ for (level = 0, i = 2; i == 2; level++) { error = xfs_btree_delrec(cur, level, &i); if (error) goto error0; if (i == 2) joined = true; } /* * If we combined blocks as part of deleting the record, delrec won't * have updated the parent high keys so we have to do that here. */ if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) { error = xfs_btree_updkeys_force(cur, 0); if (error) goto error0; } if (i == 0) { for (level = 1; level < cur->bc_nlevels; level++) { if (cur->bc_ptrs[level] == 0) { error = xfs_btree_decrement(cur, level, &i); if (error) goto error0; break; } } } XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); *stat = i; return 0; error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } /* * Get the data from the pointed-to record. */ int /* error */ xfs_btree_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ union xfs_btree_rec **recp, /* output: btree record */ int *stat) /* output: success/failure */ { struct xfs_btree_block *block; /* btree block */ struct xfs_buf *bp; /* buffer pointer */ int ptr; /* record number */ #ifdef DEBUG int error; /* error return value */ #endif ptr = cur->bc_ptrs[0]; block = xfs_btree_get_block(cur, 0, &bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, 0, bp); if (error) return error; #endif /* * Off the right end or left end, return failure. */ if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) { *stat = 0; return 0; } /* * Point to the record and extract its data. */ *recp = xfs_btree_rec_addr(cur, ptr, block); *stat = 1; return 0; } /* Visit a block in a btree. */ STATIC int xfs_btree_visit_block( struct xfs_btree_cur *cur, int level, xfs_btree_visit_blocks_fn fn, void *data) { struct xfs_btree_block *block; struct xfs_buf *bp; union xfs_btree_ptr rptr; int error; /* do right sibling readahead */ xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); block = xfs_btree_get_block(cur, level, &bp); /* process the block */ error = fn(cur, level, data); if (error) return error; /* now read rh sibling block for next iteration */ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); if (xfs_btree_ptr_is_null(cur, &rptr)) return -ENOENT; return xfs_btree_lookup_get_block(cur, level, &rptr, &block); } /* Visit every block in a btree. */ int xfs_btree_visit_blocks( struct xfs_btree_cur *cur, xfs_btree_visit_blocks_fn fn, void *data) { union xfs_btree_ptr lptr; int level; struct xfs_btree_block *block = NULL; int error = 0; cur->bc_ops->init_ptr_from_cur(cur, &lptr); /* for each level */ for (level = cur->bc_nlevels - 1; level >= 0; level--) { /* grab the left hand block */ error = xfs_btree_lookup_get_block(cur, level, &lptr, &block); if (error) return error; /* readahead the left most block for the next level down */ if (level > 0) { union xfs_btree_ptr *ptr; ptr = xfs_btree_ptr_addr(cur, 1, block); xfs_btree_readahead_ptr(cur, ptr, 1); /* save for the next iteration of the loop */ lptr = *ptr; } /* for each buffer in the level */ do { error = xfs_btree_visit_block(cur, level, fn, data); } while (!error); if (error != -ENOENT) return error; } return 0; } /* * Change the owner of a btree. * * The mechanism we use here is ordered buffer logging. Because we don't know * how many buffers were are going to need to modify, we don't really want to * have to make transaction reservations for the worst case of every buffer in a * full size btree as that may be more space that we can fit in the log.... * * We do the btree walk in the most optimal manner possible - we have sibling * pointers so we can just walk all the blocks on each level from left to right * in a single pass, and then move to the next level and do the same. We can * also do readahead on the sibling pointers to get IO moving more quickly, * though for slow disks this is unlikely to make much difference to performance * as the amount of CPU work we have to do before moving to the next block is * relatively small. * * For each btree block that we load, modify the owner appropriately, set the * buffer as an ordered buffer and log it appropriately. We need to ensure that * we mark the region we change dirty so that if the buffer is relogged in * a subsequent transaction the changes we make here as an ordered buffer are * correctly relogged in that transaction. If we are in recovery context, then * just queue the modified buffer as delayed write buffer so the transaction * recovery completion writes the changes to disk. */ struct xfs_btree_block_change_owner_info { __uint64_t new_owner; struct list_head *buffer_list; }; static int xfs_btree_block_change_owner( struct xfs_btree_cur *cur, int level, void *data) { struct xfs_btree_block_change_owner_info *bbcoi = data; struct xfs_btree_block *block; struct xfs_buf *bp; /* modify the owner */ block = xfs_btree_get_block(cur, level, &bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner); else block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner); /* * If the block is a root block hosted in an inode, we might not have a * buffer pointer here and we shouldn't attempt to log the change as the * information is already held in the inode and discarded when the root * block is formatted into the on-disk inode fork. We still change it, * though, so everything is consistent in memory. */ if (bp) { if (cur->bc_tp) { xfs_trans_ordered_buf(cur->bc_tp, bp); xfs_btree_log_block(cur, bp, XFS_BB_OWNER); } else { xfs_buf_delwri_queue(bp, bbcoi->buffer_list); } } else { ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); ASSERT(level == cur->bc_nlevels - 1); } return 0; } int xfs_btree_change_owner( struct xfs_btree_cur *cur, __uint64_t new_owner, struct list_head *buffer_list) { struct xfs_btree_block_change_owner_info bbcoi; bbcoi.new_owner = new_owner; bbcoi.buffer_list = buffer_list; return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner, &bbcoi); } /** * xfs_btree_sblock_v5hdr_verify() -- verify the v5 fields of a short-format * btree block * * @bp: buffer containing the btree block * @max_recs: pointer to the m_*_mxr max records field in the xfs mount * @pag_max_level: pointer to the per-ag max level field */ bool xfs_btree_sblock_v5hdr_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); struct xfs_perag *pag = bp->b_pag; if (!xfs_sb_version_hascrc(&mp->m_sb)) return false; if (!uuid_equal(&block->bb_u.s.bb_uuid, &mp->m_sb.sb_meta_uuid)) return false; if (block->bb_u.s.bb_blkno != cpu_to_be64(bp->b_bn)) return false; if (pag && be32_to_cpu(block->bb_u.s.bb_owner) != pag->pag_agno) return false; return true; } /** * xfs_btree_sblock_verify() -- verify a short-format btree block * * @bp: buffer containing the btree block * @max_recs: maximum records allowed in this btree node */ bool xfs_btree_sblock_verify( struct xfs_buf *bp, unsigned int max_recs) { struct xfs_mount *mp = bp->b_target->bt_mount; struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); /* numrecs verification */ if (be16_to_cpu(block->bb_numrecs) > max_recs) return false; /* sibling pointer verification */ if (!block->bb_u.s.bb_leftsib || (be32_to_cpu(block->bb_u.s.bb_leftsib) >= mp->m_sb.sb_agblocks && block->bb_u.s.bb_leftsib != cpu_to_be32(NULLAGBLOCK))) return false; if (!block->bb_u.s.bb_rightsib || (be32_to_cpu(block->bb_u.s.bb_rightsib) >= mp->m_sb.sb_agblocks && block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK))) return false; return true; } /* * Calculate the number of btree levels needed to store a given number of * records in a short-format btree. */ uint xfs_btree_compute_maxlevels( struct xfs_mount *mp, uint *limits, unsigned long len) { uint level; unsigned long maxblocks; maxblocks = (len + limits[0] - 1) / limits[0]; for (level = 1; maxblocks > 1; level++) maxblocks = (maxblocks + limits[1] - 1) / limits[1]; return level; } /* * Query a regular btree for all records overlapping a given interval. * Start with a LE lookup of the key of low_rec and return all records * until we find a record with a key greater than the key of high_rec. */ STATIC int xfs_btree_simple_query_range( struct xfs_btree_cur *cur, union xfs_btree_key *low_key, union xfs_btree_key *high_key, xfs_btree_query_range_fn fn, void *priv) { union xfs_btree_rec *recp; union xfs_btree_key rec_key; __int64_t diff; int stat; bool firstrec = true; int error; ASSERT(cur->bc_ops->init_high_key_from_rec); ASSERT(cur->bc_ops->diff_two_keys); /* * Find the leftmost record. The btree cursor must be set * to the low record used to generate low_key. */ stat = 0; error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat); if (error) goto out; /* Nothing? See if there's anything to the right. */ if (!stat) { error = xfs_btree_increment(cur, 0, &stat); if (error) goto out; } while (stat) { /* Find the record. */ error = xfs_btree_get_rec(cur, &recp, &stat); if (error || !stat) break; /* Skip if high_key(rec) < low_key. */ if (firstrec) { cur->bc_ops->init_high_key_from_rec(&rec_key, recp); firstrec = false; diff = cur->bc_ops->diff_two_keys(cur, low_key, &rec_key); if (diff > 0) goto advloop; } /* Stop if high_key < low_key(rec). */ cur->bc_ops->init_key_from_rec(&rec_key, recp); diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key); if (diff > 0) break; /* Callback */ error = fn(cur, recp, priv); if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT) break; advloop: /* Move on to the next record. */ error = xfs_btree_increment(cur, 0, &stat); if (error) break; } out: return error; } /* * Query an overlapped interval btree for all records overlapping a given * interval. This function roughly follows the algorithm given in * "Interval Trees" of _Introduction to Algorithms_, which is section * 14.3 in the 2nd and 3rd editions. * * First, generate keys for the low and high records passed in. * * For any leaf node, generate the high and low keys for the record. * If the record keys overlap with the query low/high keys, pass the * record to the function iterator. * * For any internal node, compare the low and high keys of each * pointer against the query low/high keys. If there's an overlap, * follow the pointer. * * As an optimization, we stop scanning a block when we find a low key * that is greater than the query's high key. */ STATIC int xfs_btree_overlapped_query_range( struct xfs_btree_cur *cur, union xfs_btree_key *low_key, union xfs_btree_key *high_key, xfs_btree_query_range_fn fn, void *priv) { union xfs_btree_ptr ptr; union xfs_btree_ptr *pp; union xfs_btree_key rec_key; union xfs_btree_key rec_hkey; union xfs_btree_key *lkp; union xfs_btree_key *hkp; union xfs_btree_rec *recp; struct xfs_btree_block *block; __int64_t ldiff; __int64_t hdiff; int level; struct xfs_buf *bp; int i; int error; /* Load the root of the btree. */ level = cur->bc_nlevels - 1; cur->bc_ops->init_ptr_from_cur(cur, &ptr); error = xfs_btree_lookup_get_block(cur, level, &ptr, &block); if (error) return error; xfs_btree_get_block(cur, level, &bp); trace_xfs_btree_overlapped_query_range(cur, level, bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) goto out; #endif cur->bc_ptrs[level] = 1; while (level < cur->bc_nlevels) { block = xfs_btree_get_block(cur, level, &bp); /* End of node, pop back towards the root. */ if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) { pop_up: if (level < cur->bc_nlevels - 1) cur->bc_ptrs[level + 1]++; level++; continue; } if (level == 0) { /* Handle a leaf node. */ recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block); cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp); ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey, low_key); cur->bc_ops->init_key_from_rec(&rec_key, recp); hdiff = cur->bc_ops->diff_two_keys(cur, high_key, &rec_key); /* * If (record's high key >= query's low key) and * (query's high key >= record's low key), then * this record overlaps the query range; callback. */ if (ldiff >= 0 && hdiff >= 0) { error = fn(cur, recp, priv); if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT) break; } else if (hdiff < 0) { /* Record is larger than high key; pop. */ goto pop_up; } cur->bc_ptrs[level]++; continue; } /* Handle an internal node. */ lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block); hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block); pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block); ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key); hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp); /* * If (pointer's high key >= query's low key) and * (query's high key >= pointer's low key), then * this record overlaps the query range; follow pointer. */ if (ldiff >= 0 && hdiff >= 0) { level--; error = xfs_btree_lookup_get_block(cur, level, pp, &block); if (error) goto out; xfs_btree_get_block(cur, level, &bp); trace_xfs_btree_overlapped_query_range(cur, level, bp); #ifdef DEBUG error = xfs_btree_check_block(cur, block, level, bp); if (error) goto out; #endif cur->bc_ptrs[level] = 1; continue; } else if (hdiff < 0) { /* The low key is larger than the upper range; pop. */ goto pop_up; } cur->bc_ptrs[level]++; } out: /* * If we don't end this function with the cursor pointing at a record * block, a subsequent non-error cursor deletion will not release * node-level buffers, causing a buffer leak. This is quite possible * with a zero-results range query, so release the buffers if we * failed to return any results. */ if (cur->bc_bufs[0] == NULL) { for (i = 0; i < cur->bc_nlevels; i++) { if (cur->bc_bufs[i]) { xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]); cur->bc_bufs[i] = NULL; cur->bc_ptrs[i] = 0; cur->bc_ra[i] = 0; } } } return error; } /* * Query a btree for all records overlapping a given interval of keys. The * supplied function will be called with each record found; return one of the * XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error * code. This function returns XFS_BTREE_QUERY_RANGE_ABORT, zero, or a * negative error code. */ int xfs_btree_query_range( struct xfs_btree_cur *cur, union xfs_btree_irec *low_rec, union xfs_btree_irec *high_rec, xfs_btree_query_range_fn fn, void *priv) { union xfs_btree_rec rec; union xfs_btree_key low_key; union xfs_btree_key high_key; /* Find the keys of both ends of the interval. */ cur->bc_rec = *high_rec; cur->bc_ops->init_rec_from_cur(cur, &rec); cur->bc_ops->init_key_from_rec(&high_key, &rec); cur->bc_rec = *low_rec; cur->bc_ops->init_rec_from_cur(cur, &rec); cur->bc_ops->init_key_from_rec(&low_key, &rec); /* Enforce low key < high key. */ if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0) return -EINVAL; if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) return xfs_btree_simple_query_range(cur, &low_key, &high_key, fn, priv); return xfs_btree_overlapped_query_range(cur, &low_key, &high_key, fn, priv); } /* * Calculate the number of blocks needed to store a given number of records * in a short-format (per-AG metadata) btree. */ xfs_extlen_t xfs_btree_calc_size( struct xfs_mount *mp, uint *limits, unsigned long long len) { int level; int maxrecs; xfs_extlen_t rval; maxrecs = limits[0]; for (level = 0, rval = 0; len > 1; level++) { len += maxrecs - 1; do_div(len, maxrecs); maxrecs = limits[1]; rval += len; } return rval; } int xfs_btree_count_blocks_helper( struct xfs_btree_cur *cur, int level, void *data) { xfs_extlen_t *blocks = data; (*blocks)++; return 0; } /* Count the blocks in a btree and return the result in *blocks. */ int xfs_btree_count_blocks( struct xfs_btree_cur *cur, xfs_extlen_t *blocks) { *blocks = 0; return xfs_btree_visit_blocks(cur, xfs_btree_count_blocks_helper, blocks); }
dperezde/little-penguin
linux/fs/xfs/libxfs/xfs_btree.c
C
gpl-2.0
129,934
#!/usr/bin/env bash set -e MYSQL_51_VERSION=5.1.59 MYSQL_55_VERSION=5.5.17 MYSQL_56_VERSION=5.6.10 PS_51_VERSION=5.1.59-13.0 PS_55_VERSION=5.5.16-22.0 if [ -r ../CMakeCache.txt ]; then echo BUILD_TYPE= ${BUILD_TYPE:="$(grep CMAKE_BUILD_TYPE:STRING= ../CMakeCache.txt | cut -f2- -d=)"} echo SRC_DIR= ${SRC_DIR:="$(grep MySQL_SOURCE_DIR:STATIC= ../CMakeCache.txt | cut -f2- -d=)"} echo CMAKE= ${CMAKE:="$(grep CMAKE_COMMAND:INTERNAL= ../CMakeCache.txt | cut -f2- -d=)"} echo AR= ${LD:="$(grep CMAKE_AR:FILEPATH= ../CMakeCache.txt | cut -f2- -d=)"} echo LD= ${LD:="$(grep CMAKE_LINKER:FILEPATH= ../CMakeCache.txt | cut -f2- -d=)"} echo RANLIB= ${LD:="$(grep CMAKE_RANLIB:FILEPATH= ../CMakeCache.txt | cut -f2- -d=)"} echo CC= ${CC:="$(grep CMAKE_C_COMPILER:FILEPATH= ../CMakeCache.txt | cut -f2- -d=) $(grep CMAKE_C_COMPILER_ARG1:STRING= ../CMakeCache.txt | cut -f2- -d=)"} echo CXX= ${CXX:="$(grep CMAKE_CXX_COMPILER:FILEPATH= ../CMakeCache.txt | cut -f2- -d=) $(grep CMAKE_CXX_COMPILER_ARG1:STRING= ../CMakeCache.txt | cut -f2- -d=)"} echo CFLAGS= ${CFLAGS:="$(grep CMAKE_C_FLAGS:STRING= ../CMakeCache.txt | cut -f2- -d=)"} echo CXXFLAGS= ${CXXFLAGS:="$(grep CMAKE_CXX_FLAGS:STRING= ../CMakeCache.txt | cut -f2- -d=)"} echo LDFLAGS= ${LDFLAGS:="$(grep CMAKE_EXE_LINKER_FLAGS:STRING= ../CMakeCache.txt | cut -f2- -d=)"} echo MYSQLD_LDFLAGS= ${MYSQLD_LDFLAGS:="$(grep MYSQLD_LDFLAGS:STRING= ../CMakeCache.txt | cut -f2- -d=)"} else echo BUILD_TYPE= ${BUILD_TYPE:=RelWithDebInfo} echo SRC_DIR= ${SRC_DIR:=$(readlink -f ..)} echo CMAKE= ${CMAKE:=cmake} echo AR= ${AR:-} echo LD= ${AR:-} echo RANLIB= ${AR:-} echo CC= ${CC:=gcc} echo CXX= ${CXX:=g++} echo CFLAGS= ${CFLAGS:-} echo CXXFLAGS= ${CXXFLAGS:-} echo LDFLAGS= ${LDFLAGS:-} echo MYSQLD_LDFLAGS= ${MYSQLD_LDFLAGS:-} fi export AR="$AR" export LD="$LD" export RANLIB="$RANLIB" export CC="$CC" export CXX="$CXX" export CFLAGS="$CFLAGS -DXTRABACKUP" export CXXFLAGS="$CXXFLAGS -DXTRABACKUP" export LDFLAGS="$LDFLAGS" export MYSQLD_LDFLAGS="$MYSQLD_LDFLAGS" export SRC_DIR="$SRC_DIR" export BUILD_TYPE="$BUILD_TYPE" MAKE_CMD=make if gmake --version > /dev/null 2>&1 then MAKE_CMD=gmake fi MAKE_CMD="$MAKE_CMD -j6" function usage() { echo "Build an xtrabackup binary against the specified InnoDB flavor." echo echo "Usage: `basename $0` CODEBASE" echo "where CODEBASE can be one of the following values or aliases:" echo " innodb56 | 5.6 build against InnoDB in MySQL 5.6" exit -1 } ################################################################################ # Invoke 'make' in the specified directoies ################################################################################ function make_dirs() { for d in $* do $MAKE_CMD -C $d done } function build_server() { local $type=$1 echo "Configuring the server" cd $server_dir echo eval $configure_cmd eval $configure_cmd echo "Creating generated files" $MAKE_CMD -C sql GenDigestServerSource echo "Building the server" make_dirs libmysqld cd $top_dir } function build_libarchive() { echo "Building libarchive" cd $top_dir/src/libarchive ${CMAKE} . \ -DCMAKE_BUILD_TYPE="$BUILD_TYPE" \ -DCMAKE_AR="$AR" \ -DCMAKE_LINKER="$LD" \ -DCMAKE_RANLIB="$RANLIB" \ -DCMAKE_C_COMPILER="$CC" \ -DCMAKE_CXX_COMPILER="$CXX" \ -DCMAKE_C_FLAGS="$CFLAGS" \ -DCMAKE_CXX_FLAGS="$CXXFLAGS" \ -DCMAKE_DISABLE_FIND_PACKAGE_BZip2=TRUE \ -DCMAKE_DISABLE_FIND_PACKAGE_LZMA=TRUE \ -DCMAKE_DISABLE_FIND_PACKAGE_LibXml2=TRUE \ -DCMAKE_DISABLE_FIND_PACKAGE_EXPAT=TRUE \ -DENABLE_CPIO=OFF \ -DENABLE_OPENSSL=OFF \ -DENABLE_TAR=OFF \ -DENABLE_TEST=OFF $MAKE_CMD || exit -1 } function build_xtrabackup() { build_libarchive echo "Building XtraBackup" # Read XTRABACKUP_VERSION from the VERSION file . $top_dir/VERSION cd $top_dir/src if [ "`uname -s`" = "Linux" ] then export LIBS="$LIBS -lrt" fi $MAKE_CMD MYSQL_ROOT_DIR=$server_dir clean $MAKE_CMD MYSQL_ROOT_DIR=$server_dir XTRABACKUP_VERSION=$XTRABACKUP_VERSION $xtrabackup_target cd $top_dir } ################################################################################ # Do all steps to build the server, xtrabackup and xbstream # Expects the following variables to be set before calling: # mysql_version version string (e.g. "5.1.53") # server_patch name of the patch to apply to server source before # building (e.g. "xtradb51.patch") # innodb_name either "innobase" or "innodb_plugin" # configure_cmd server configure command # xtrabackup_target 'make' target to build in the xtrabackup build directory # ################################################################################ function build_all() { local type=$1 mkdir -p $server_dir build_server $type build_xtrabackup } if ! test -f src/xtrabackup.cc then echo "`basename $0` must be run from the directory with XtraBackup sources" usage fi type=$1 top_dir=`pwd` case "$type" in "innodb56" | "5.6") mysql_version=$MYSQL_56_VERSION server_patch=innodb56.patch innodb_name=innobase xtrabackup_target=5.6 mysql_version_short=${mysql_version:0:3} server_dir=$top_dir/mysql-$mysql_version_short configure_cmd="${CMAKE} $SRC_DIR \ -DWITH_MYSQLD_LDFLAGS='$MYSQLD_LDFLAGS' \ -DWITH_INNOBASE_STORAGE_ENGINE=ON \ -DWITH_PERFSCHEMA_STORAGE_ENGINE=ON \ -DMYSQL_DATADIR="/var/lib/mysql" \ -DMYSQL_UNIX_ADDR="/var/lib/mysql/mysql.sock" \ -DBUILD_CONFIG=mysql_release \ -DMYSQL_USER="mysql" \ -DWITH_FAST_MUTEXES=1 \ -DWITH_EXTRA_CHARSETS=all \ -DWITH_EMBEDDED_SERVER=1 \ -DMYSQL_MAINTAINER_MODE=1 \ -DMYSQL_ROOT_DIR=$server_dir \ -DCMAKE_AR=$AR \ -DCMAKE_LINKER=$LD \ -DCMAKE_RANLIB=$RANLIB \ -DCMAKE_BUILD_TYPE=$BUILD_TYPE" if [ -n "$CMAKE_PREFIX_PATH" ]; then configure_cmd+=" -DCMAKE_PREFIX_PATH=$CMAKE_PREFIX_PATH" fi if [ -n "$CURSES_LIBRARY" ]; then configure_cmd+=" -DCURSES_LIBRARY=$CURSES_LIBRARY" fi if [ -n "$CURSES_INCLUDE_PATH" ]; then configure_cmd+=" -DCURSES_INCLUDE_PATH=$CURSES_INCLUDE_PATH" fi if [ -n "$KRB_PATH" ]; then configure_cmd+=" -DWITH_KRB=$KRB_PATH" fi if [ -n "$SSL_PATH" ]; then configure_cmd+=" -DWITH_SSL=$SSL_PATH" fi if [ -n "$ZLIB_PATH" ]; then configure_cmd+=" -DWITH_ZLIB=$ZLIB_PATH" else congigure_cmd+=" -DWITH_ZLIB=bundled" fi if [ -n "$GLIBC_PATH" ]; then configure_cmd+=" -DWITH_GLIBC=$GLIBC_PATH" fi build_all $type ;; *) usage ;; esac
MySQLOnRocksDB/mysql-5.6
xtrabackup/utils/build.sh
Shell
gpl-2.0
7,100
/* * Copyright (C) 2008-2019 TrinityCore <https://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* * Scripts for spells with SPELLFAMILY_PRIEST and SPELLFAMILY_GENERIC spells used by priest players. * Ordered alphabetically using scriptname. * Scriptnames of files in this file should be prefixed with "spell_pri_". */ #include "ScriptMgr.h" #include "AreaTriggerAI.h" #include "GridNotifiers.h" #include "ObjectAccessor.h" #include "Player.h" #include "SpellAuraEffects.h" #include "SpellMgr.h" #include "SpellScript.h" enum PriestSpells { SPELL_PRIEST_ABSOLUTION = 33167, SPELL_PRIEST_ANGELIC_FEATHER_AREATRIGGER = 158624, SPELL_PRIEST_ANGELIC_FEATHER_AURA = 121557, SPELL_PRIEST_ANGELIC_FEATHER_TRIGGER = 121536, SPELL_PRIEST_ARMOR_OF_FAITH = 28810, SPELL_PRIEST_ATONEMENT = 81749, SPELL_PRIEST_ATONEMENT_HEAL = 81751, SPELL_PRIEST_ATONEMENT_TRIGGERED = 194384, SPELL_PRIEST_BLESSED_HEALING = 70772, SPELL_PRIEST_BODY_AND_SOUL = 64129, SPELL_PRIEST_BODY_AND_SOUL_DISPEL = 64136, SPELL_PRIEST_BODY_AND_SOUL_SPEED = 65081, SPELL_PRIEST_CURE_DISEASE = 528, SPELL_PRIEST_DISPEL_MAGIC_FRIENDLY = 97690, SPELL_PRIEST_DISPEL_MAGIC_HOSTILE = 97691, SPELL_PRIEST_DIVINE_AEGIS = 47753, SPELL_PRIEST_DIVINE_BLESSING = 40440, SPELL_PRIEST_DIVINE_WRATH = 40441, SPELL_PRIEST_GLYPH_OF_CIRCLE_OF_HEALING = 55675, SPELL_PRIEST_GLYPH_OF_DISPEL_MAGIC = 55677, SPELL_PRIEST_GLYPH_OF_DISPEL_MAGIC_HEAL = 56131, SPELL_PRIEST_GLYPH_OF_LIGHTWELL = 55673, SPELL_PRIEST_GLYPH_OF_PRAYER_OF_HEALING_HEAL = 56161, SPELL_PRIEST_GLYPH_OF_SHADOW = 107906, SPELL_PRIEST_GUARDIAN_SPIRIT_HEAL = 48153, SPELL_PRIEST_ITEM_EFFICIENCY = 37595, SPELL_PRIEST_LEAP_OF_FAITH = 73325, SPELL_PRIEST_LEAP_OF_FAITH_EFFECT = 92832, SPELL_PRIEST_LEAP_OF_FAITH_EFFECT_TRIGGER = 92833, SPELL_PRIEST_LEAP_OF_FAITH_TRIGGERED = 92572, SPELL_PRIEST_LEVITATE_EFFECT = 111759, SPELL_PRIEST_MANA_LEECH_PROC = 34650, SPELL_PRIEST_ORACULAR_HEAL = 26170, SPELL_PRIEST_PENANCE_R1 = 47540, SPELL_PRIEST_PENANCE_R1_DAMAGE = 47758, SPELL_PRIEST_PENANCE_R1_HEAL = 47757, SPELL_PRIEST_REFLECTIVE_SHIELD_R1 = 33201, SPELL_PRIEST_REFLECTIVE_SHIELD_TRIGGERED = 33619, SPELL_PRIEST_RENEWED_HOPE = 197469, SPELL_PRIEST_RENEWED_HOPE_EFFECT = 197470, SPELL_PRIEST_SHADOWFORM_VISUAL_WITH_GLYPH = 107904, SPELL_PRIEST_SHADOWFORM_VISUAL_WITHOUT_GLYPH = 107903, SPELL_PRIEST_SHIELD_DISCIPLINE_ENERGIZE = 47755, SPELL_PRIEST_SHIELD_DISCIPLINE_PASSIVE = 197045, SPELL_PRIEST_SPIRIT_OF_REDEMPTION = 27827, SPELL_PRIEST_STRENGTH_OF_SOUL = 197535, SPELL_PRIEST_STRENGTH_OF_SOUL_EFFECT = 197548, SPELL_PRIEST_T9_HEALING_2P = 67201, SPELL_PRIEST_THE_PENITENT_AURA = 200347, SPELL_PRIEST_TWIN_DISCIPLINES_RANK_1 = 47586, SPELL_PRIEST_VAMPIRIC_EMBRACE_HEAL = 15290, SPELL_PRIEST_VAMPIRIC_TOUCH_DISPEL = 64085, SPELL_PRIEST_VOID_SHIELD = 199144, SPELL_PRIEST_VOID_SHIELD_EFFECT = 199145 }; enum MiscSpells { SPELL_GEN_REPLENISHMENT = 57669 }; class PowerCheck { public: explicit PowerCheck(Powers const power) : _power(power) { } bool operator()(WorldObject* obj) const { if (Unit* target = obj->ToUnit()) return target->GetPowerType() != _power; return true; } private: Powers const _power; }; class RaidCheck { public: explicit RaidCheck(Unit const* caster) : _caster(caster) { } bool operator()(WorldObject* obj) const { if (Unit* target = obj->ToUnit()) return !_caster->IsInRaidWith(target); return true; } private: Unit const* _caster; }; // 26169 - Oracle Healing Bonus class spell_pri_aq_3p_bonus : public SpellScriptLoader { public: spell_pri_aq_3p_bonus() : SpellScriptLoader("spell_pri_aq_3p_bonus") { } class spell_pri_aq_3p_bonus_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_aq_3p_bonus_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_ORACULAR_HEAL }); } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); Unit* caster = eventInfo.GetActor(); if (caster == eventInfo.GetProcTarget()) return; HealInfo* healInfo = eventInfo.GetHealInfo(); if (!healInfo || !healInfo->GetHeal()) return; int32 amount = CalculatePct(static_cast<int32>(healInfo->GetHeal()), 10); caster->CastCustomSpell(SPELL_PRIEST_ORACULAR_HEAL, SPELLVALUE_BASE_POINT0, amount, caster, true, nullptr, aurEff); } void Register() override { OnEffectProc += AuraEffectProcFn(spell_pri_aq_3p_bonus_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_aq_3p_bonus_AuraScript(); } }; // 81749 - Atonement class spell_pri_atonement : public SpellScriptLoader { public: static char constexpr const ScriptName[] = "spell_pri_atonement"; spell_pri_atonement() : SpellScriptLoader(ScriptName) { } class spell_pri_atonement_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_atonement_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_ATONEMENT_HEAL }); } bool CheckProc(ProcEventInfo& eventInfo) { return eventInfo.GetDamageInfo() != nullptr; } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { DamageInfo* damageInfo = eventInfo.GetDamageInfo(); int32 heal = CalculatePct(damageInfo->GetDamage(), aurEff->GetAmount()); _appliedAtonements.erase(std::remove_if(_appliedAtonements.begin(), _appliedAtonements.end(), [this, heal](ObjectGuid const& targetGuid) { if (Unit* target = ObjectAccessor::GetUnit(*GetTarget(), targetGuid)) { if (target->GetExactDist(GetTarget()) < GetSpellInfo()->GetEffect(EFFECT_1)->CalcValue()) GetTarget()->CastCustomSpell(SPELL_PRIEST_ATONEMENT_HEAL, SPELLVALUE_BASE_POINT0, heal, target, true); return false; } return true; }), _appliedAtonements.end()); } void Register() override { DoCheckProc += AuraCheckProcFn(spell_pri_atonement_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_atonement_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } std::vector<ObjectGuid> _appliedAtonements; public: void AddAtonementTarget(ObjectGuid const& target) { _appliedAtonements.push_back(target); } void RemoveAtonementTarget(ObjectGuid const& target) { _appliedAtonements.erase(std::remove(_appliedAtonements.begin(), _appliedAtonements.end(), target), _appliedAtonements.end()); } }; AuraScript* GetAuraScript() const override { return new spell_pri_atonement_AuraScript(); } }; char constexpr const spell_pri_atonement::ScriptName[]; // 194384 - Atonement class spell_pri_atonement_triggered : public SpellScriptLoader { public: spell_pri_atonement_triggered() : SpellScriptLoader("spell_pri_atonement_triggered") { } class spell_pri_atonement_triggered_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_atonement_triggered_AuraScript); using AtonementScript = spell_pri_atonement::spell_pri_atonement_AuraScript; bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_ATONEMENT }); } void HandleOnApply(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { RegisterHelper<&AtonementScript::AddAtonementTarget>(); } void HandleOnRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { RegisterHelper<&AtonementScript::RemoveAtonementTarget>(); } template<void(AtonementScript::*func)(ObjectGuid const&)> void RegisterHelper() { if (Unit* caster = GetCaster()) if (Aura* atonement = caster->GetAura(SPELL_PRIEST_ATONEMENT)) if (AtonementScript* script = atonement->GetScript<AtonementScript>(spell_pri_atonement::ScriptName)) (script->*func)(GetTarget()->GetGUID()); } void Register() override { OnEffectApply += AuraEffectApplyFn(spell_pri_atonement_triggered_AuraScript::HandleOnApply, EFFECT_0, SPELL_AURA_DUMMY, AURA_EFFECT_HANDLE_REAL); OnEffectRemove += AuraEffectRemoveFn(spell_pri_atonement_triggered_AuraScript::HandleOnRemove, EFFECT_0, SPELL_AURA_DUMMY, AURA_EFFECT_HANDLE_REAL); } }; AuraScript* GetAuraScript() const override { return new spell_pri_atonement_triggered_AuraScript(); } }; // 64129 - Body and Soul class spell_pri_body_and_soul : public SpellScriptLoader { public: spell_pri_body_and_soul() : SpellScriptLoader("spell_pri_body_and_soul") { } class spell_pri_body_and_soul_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_body_and_soul_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_CURE_DISEASE, SPELL_PRIEST_BODY_AND_SOUL_DISPEL }); } void HandleEffectSpeedProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); // Proc only with Power Word: Shield or Leap of Faith if (!(eventInfo.GetDamageInfo()->GetSpellInfo()->SpellFamilyFlags[0] & 0x1 || eventInfo.GetDamageInfo()->GetSpellInfo()->SpellFamilyFlags[2] & 0x80000)) return; GetTarget()->CastCustomSpell(SPELL_PRIEST_BODY_AND_SOUL_SPEED, SPELLVALUE_BASE_POINT0, aurEff->GetAmount(), eventInfo.GetProcTarget(), true, NULL, aurEff); } void HandleEffectDispelProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); // Proc only with Cure Disease if (eventInfo.GetDamageInfo()->GetSpellInfo()->Id != SPELL_PRIEST_CURE_DISEASE || eventInfo.GetProcTarget() != GetTarget()) return; if (roll_chance_i(aurEff->GetAmount())) GetTarget()->CastSpell(eventInfo.GetProcTarget(), SPELL_PRIEST_BODY_AND_SOUL_DISPEL, true, NULL, aurEff); } void Register() override { OnEffectProc += AuraEffectProcFn(spell_pri_body_and_soul_AuraScript::HandleEffectSpeedProc, EFFECT_0, SPELL_AURA_DUMMY); OnEffectProc += AuraEffectProcFn(spell_pri_body_and_soul_AuraScript::HandleEffectDispelProc, EFFECT_1, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_body_and_soul_AuraScript(); } }; // 34861 - Circle of Healing class spell_pri_circle_of_healing : public SpellScriptLoader { public: spell_pri_circle_of_healing() : SpellScriptLoader("spell_pri_circle_of_healing") { } class spell_pri_circle_of_healing_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_circle_of_healing_SpellScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_GLYPH_OF_CIRCLE_OF_HEALING }); } void FilterTargets(std::list<WorldObject*>& targets) { targets.remove_if(RaidCheck(GetCaster())); uint32 const maxTargets = GetCaster()->HasAura(SPELL_PRIEST_GLYPH_OF_CIRCLE_OF_HEALING) ? 6 : 5; // Glyph of Circle of Healing if (targets.size() > maxTargets) { targets.sort(Trinity::HealthPctOrderPred()); targets.resize(maxTargets); } } void Register() override { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_pri_circle_of_healing_SpellScript::FilterTargets, EFFECT_0, TARGET_UNIT_DEST_AREA_ALLY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_circle_of_healing_SpellScript(); } }; // 527 - Dispel magic class spell_pri_dispel_magic : public SpellScriptLoader { public: spell_pri_dispel_magic() : SpellScriptLoader("spell_pri_dispel_magic") { } class spell_pri_dispel_magic_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_dispel_magic_SpellScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo( { SPELL_PRIEST_ABSOLUTION, SPELL_PRIEST_GLYPH_OF_DISPEL_MAGIC_HEAL, SPELL_PRIEST_GLYPH_OF_DISPEL_MAGIC }); } SpellCastResult CheckCast() { Unit* caster = GetCaster(); Unit* target = GetExplTargetUnit(); if (!target || (!caster->HasAura(SPELL_PRIEST_ABSOLUTION) && caster != target && target->IsFriendlyTo(caster))) return SPELL_FAILED_BAD_TARGETS; return SPELL_CAST_OK; } void AfterEffectHit(SpellEffIndex /*effIndex*/) { if (GetHitUnit()->IsFriendlyTo(GetCaster())) { GetCaster()->CastSpell(GetHitUnit(), SPELL_PRIEST_DISPEL_MAGIC_FRIENDLY, true); if (AuraEffect const* aurEff = GetHitUnit()->GetAuraEffect(SPELL_PRIEST_GLYPH_OF_DISPEL_MAGIC, EFFECT_0)) { int32 heal = GetHitUnit()->CountPctFromMaxHealth(aurEff->GetAmount()); GetCaster()->CastCustomSpell(SPELL_PRIEST_GLYPH_OF_DISPEL_MAGIC_HEAL, SPELLVALUE_BASE_POINT0, heal, GetHitUnit()); } } else GetCaster()->CastSpell(GetHitUnit(), SPELL_PRIEST_DISPEL_MAGIC_HOSTILE, true); } void Register() override { OnCheckCast += SpellCheckCastFn(spell_pri_dispel_magic_SpellScript::CheckCast); OnEffectHitTarget += SpellEffectFn(spell_pri_dispel_magic_SpellScript::AfterEffectHit, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_dispel_magic_SpellScript(); } }; // -47509 - Divine Aegis class spell_pri_divine_aegis : public SpellScriptLoader { public: spell_pri_divine_aegis() : SpellScriptLoader("spell_pri_divine_aegis") { } class spell_pri_divine_aegis_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_divine_aegis_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_DIVINE_AEGIS }); } bool CheckProc(ProcEventInfo& eventInfo) { return eventInfo.GetProcTarget() != nullptr; } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); HealInfo* healInfo = eventInfo.GetHealInfo(); if (!healInfo || !healInfo->GetHeal()) return; int32 absorb = CalculatePct(healInfo->GetHeal(), aurEff->GetAmount()); // Multiple effects stack, so let's try to find this aura. if (AuraEffect const* aegis = eventInfo.GetProcTarget()->GetAuraEffect(SPELL_PRIEST_DIVINE_AEGIS, EFFECT_0)) absorb += aegis->GetAmount(); absorb = std::min(absorb, eventInfo.GetProcTarget()->getLevel() * 125); GetTarget()->CastCustomSpell(SPELL_PRIEST_DIVINE_AEGIS, SPELLVALUE_BASE_POINT0, absorb, eventInfo.GetProcTarget(), true, nullptr, aurEff); } void Register() override { DoCheckProc += AuraCheckProcFn(spell_pri_divine_aegis_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_divine_aegis_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_divine_aegis_AuraScript(); } }; // 64844 - Divine Hymn class spell_pri_divine_hymn : public SpellScriptLoader { public: spell_pri_divine_hymn() : SpellScriptLoader("spell_pri_divine_hymn") { } class spell_pri_divine_hymn_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_divine_hymn_SpellScript); void FilterTargets(std::list<WorldObject*>& targets) { targets.remove_if(RaidCheck(GetCaster())); uint32 const maxTargets = 3; if (targets.size() > maxTargets) { targets.sort(Trinity::HealthPctOrderPred()); targets.resize(maxTargets); } } void Register() override { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_pri_divine_hymn_SpellScript::FilterTargets, EFFECT_ALL, TARGET_UNIT_SRC_AREA_ALLY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_divine_hymn_SpellScript(); } }; // 55680 - Glyph of Prayer of Healing class spell_pri_glyph_of_prayer_of_healing : public SpellScriptLoader { public: spell_pri_glyph_of_prayer_of_healing() : SpellScriptLoader("spell_pri_glyph_of_prayer_of_healing") { } class spell_pri_glyph_of_prayer_of_healing_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_glyph_of_prayer_of_healing_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_GLYPH_OF_PRAYER_OF_HEALING_HEAL }); } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); HealInfo* healInfo = eventInfo.GetHealInfo(); if (!healInfo || !healInfo->GetHeal()) return; SpellInfo const* triggeredSpellInfo = sSpellMgr->AssertSpellInfo(SPELL_PRIEST_GLYPH_OF_PRAYER_OF_HEALING_HEAL); int32 heal = int32(CalculatePct(healInfo->GetHeal(), aurEff->GetAmount()) / triggeredSpellInfo->GetMaxTicks(DIFFICULTY_NONE)); GetTarget()->CastCustomSpell(SPELL_PRIEST_GLYPH_OF_PRAYER_OF_HEALING_HEAL, SPELLVALUE_BASE_POINT0, heal, eventInfo.GetProcTarget(), true, NULL, aurEff); } void Register() override { OnEffectProc += AuraEffectProcFn(spell_pri_glyph_of_prayer_of_healing_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_glyph_of_prayer_of_healing_AuraScript(); } }; // 24191 - Improved Power Word Shield class spell_pri_improved_power_word_shield : public SpellScriptLoader { public: spell_pri_improved_power_word_shield() : SpellScriptLoader("spell_pri_improved_power_word_shield") { } class spell_pri_improved_power_word_shield_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_improved_power_word_shield_AuraScript); void HandleEffectCalcSpellMod(AuraEffect const* aurEff, SpellModifier*& spellMod) { if (!spellMod) { spellMod = new SpellModifier(GetAura()); spellMod->op = SpellModOp(aurEff->GetMiscValue()); spellMod->type = SPELLMOD_PCT; spellMod->spellId = GetId(); spellMod->mask = GetSpellInfo()->GetEffect(aurEff->GetEffIndex())->SpellClassMask; } spellMod->value = aurEff->GetAmount(); } void Register() override { DoEffectCalcSpellMod += AuraEffectCalcSpellModFn(spell_pri_improved_power_word_shield_AuraScript::HandleEffectCalcSpellMod, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_improved_power_word_shield_AuraScript(); } }; // 47788 - Guardian Spirit class spell_pri_guardian_spirit : public SpellScriptLoader { public: spell_pri_guardian_spirit() : SpellScriptLoader("spell_pri_guardian_spirit") { } class spell_pri_guardian_spirit_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_guardian_spirit_AuraScript); public: spell_pri_guardian_spirit_AuraScript() { healPct = 0; } private: uint32 healPct; bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_GUARDIAN_SPIRIT_HEAL }); } bool Load() override { healPct = GetSpellInfo()->GetEffect(EFFECT_1)->CalcValue(); return true; } void CalculateAmount(AuraEffect const* /*aurEff*/, int32 & amount, bool & /*canBeRecalculated*/) { // Set absorbtion amount to unlimited amount = -1; } void Absorb(AuraEffect* /*aurEff*/, DamageInfo & dmgInfo, uint32 & absorbAmount) { Unit* target = GetTarget(); if (dmgInfo.GetDamage() < target->GetHealth()) return; int32 healAmount = int32(target->CountPctFromMaxHealth(healPct)); // remove the aura now, we don't want 40% healing bonus Remove(AURA_REMOVE_BY_ENEMY_SPELL); target->CastCustomSpell(target, SPELL_PRIEST_GUARDIAN_SPIRIT_HEAL, &healAmount, NULL, NULL, true); absorbAmount = dmgInfo.GetDamage(); } void Register() override { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_pri_guardian_spirit_AuraScript::CalculateAmount, EFFECT_1, SPELL_AURA_SCHOOL_ABSORB); OnEffectAbsorb += AuraEffectAbsorbFn(spell_pri_guardian_spirit_AuraScript::Absorb, EFFECT_1); } }; AuraScript* GetAuraScript() const override { return new spell_pri_guardian_spirit_AuraScript(); } }; // 64904 - Hymn of Hope class spell_pri_hymn_of_hope : public SpellScriptLoader { public: spell_pri_hymn_of_hope() : SpellScriptLoader("spell_pri_hymn_of_hope") { } class spell_pri_hymn_of_hope_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_hymn_of_hope_SpellScript); void FilterTargets(std::list<WorldObject*>& targets) { targets.remove_if(PowerCheck(POWER_MANA)); targets.remove_if(RaidCheck(GetCaster())); uint32 const maxTargets = 3; if (targets.size() > maxTargets) { targets.sort(Trinity::PowerPctOrderPred(POWER_MANA)); targets.resize(maxTargets); } } void Register() override { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_pri_hymn_of_hope_SpellScript::FilterTargets, EFFECT_ALL, TARGET_UNIT_SRC_AREA_ALLY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_hymn_of_hope_SpellScript(); } }; // 40438 - Priest Tier 6 Trinket class spell_pri_item_t6_trinket : public SpellScriptLoader { public: spell_pri_item_t6_trinket() : SpellScriptLoader("spell_pri_item_t6_trinket") { } class spell_pri_item_t6_trinket_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_item_t6_trinket_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_DIVINE_BLESSING, SPELL_PRIEST_DIVINE_WRATH }); } void HandleProc(AuraEffect const* /*aurEff*/, ProcEventInfo& eventInfo) { PreventDefaultAction(); Unit* caster = eventInfo.GetActor(); if (eventInfo.GetSpellTypeMask() & PROC_SPELL_TYPE_HEAL) caster->CastSpell((Unit*)nullptr, SPELL_PRIEST_DIVINE_BLESSING, true); if (eventInfo.GetSpellTypeMask() & PROC_SPELL_TYPE_DAMAGE) caster->CastSpell((Unit*)nullptr, SPELL_PRIEST_DIVINE_WRATH, true); } void Register() override { OnEffectProc += AuraEffectProcFn(spell_pri_item_t6_trinket_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_item_t6_trinket_AuraScript(); } }; // 92833 - Leap of Faith class spell_pri_leap_of_faith_effect_trigger : public SpellScriptLoader { public: spell_pri_leap_of_faith_effect_trigger() : SpellScriptLoader("spell_pri_leap_of_faith_effect_trigger") { } class spell_pri_leap_of_faith_effect_trigger_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_leap_of_faith_effect_trigger_SpellScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_LEAP_OF_FAITH_EFFECT }); } void HandleEffectDummy(SpellEffIndex /*effIndex*/) { Position destPos = GetHitDest()->GetPosition(); SpellCastTargets targets; targets.SetDst(destPos); targets.SetUnitTarget(GetCaster()); GetHitUnit()->CastSpell(targets, sSpellMgr->GetSpellInfo(GetEffectValue()), NULL); } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_pri_leap_of_faith_effect_trigger_SpellScript::HandleEffectDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_leap_of_faith_effect_trigger_SpellScript(); } }; // 1706 - Levitate class spell_pri_levitate : public SpellScriptLoader { public: spell_pri_levitate() : SpellScriptLoader("spell_pri_levitate") { } class spell_pri_levitate_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_levitate_SpellScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_LEVITATE_EFFECT }); } void HandleDummy(SpellEffIndex /*effIndex*/) { GetCaster()->CastSpell(GetHitUnit(), SPELL_PRIEST_LEVITATE_EFFECT, true); } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_pri_levitate_SpellScript::HandleDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const { return new spell_pri_levitate_SpellScript; } }; // 7001 - Lightwell Renew class spell_pri_lightwell_renew : public SpellScriptLoader { public: spell_pri_lightwell_renew() : SpellScriptLoader("spell_pri_lightwell_renew") { } class spell_pri_lightwell_renew_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_lightwell_renew_AuraScript); void CalculateAmount(AuraEffect const* /*aurEff*/, int32& amount, bool& /*canBeRecalculated*/) { if (Unit* caster = GetCaster()) { // Bonus from Glyph of Lightwell if (AuraEffect* modHealing = caster->GetAuraEffect(SPELL_PRIEST_GLYPH_OF_LIGHTWELL, EFFECT_0)) AddPct(amount, modHealing->GetAmount()); } } void Register() override { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_pri_lightwell_renew_AuraScript::CalculateAmount, EFFECT_0, SPELL_AURA_PERIODIC_HEAL); } }; AuraScript* GetAuraScript() const override { return new spell_pri_lightwell_renew_AuraScript(); } }; // 8129 - Mana Burn class spell_pri_mana_burn : public SpellScriptLoader { public: spell_pri_mana_burn() : SpellScriptLoader("spell_pri_mana_burn") { } class spell_pri_mana_burn_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_mana_burn_SpellScript); void HandleAfterHit() { if (Unit* unitTarget = GetHitUnit()) unitTarget->RemoveAurasWithMechanic((1 << MECHANIC_FEAR) | (1 << MECHANIC_POLYMORPH)); } void Register() override { AfterHit += SpellHitFn(spell_pri_mana_burn_SpellScript::HandleAfterHit); } }; SpellScript* GetSpellScript() const override { return new spell_pri_mana_burn_SpellScript; } }; // 28305 - Mana Leech (Passive) (Priest Pet Aura) class spell_pri_mana_leech : public SpellScriptLoader { public: spell_pri_mana_leech() : SpellScriptLoader("spell_pri_mana_leech") { } class spell_pri_mana_leech_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_mana_leech_AuraScript); public: spell_pri_mana_leech_AuraScript() { _procTarget = nullptr; } private: bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_MANA_LEECH_PROC }); } bool CheckProc(ProcEventInfo& /*eventInfo*/) { _procTarget = GetTarget()->GetOwner(); return _procTarget != nullptr; } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& /*eventInfo*/) { PreventDefaultAction(); GetTarget()->CastSpell(_procTarget, SPELL_PRIEST_MANA_LEECH_PROC, true, NULL, aurEff); } void Register() override { DoCheckProc += AuraCheckProcFn(spell_pri_mana_leech_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_mana_leech_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } private: Unit* _procTarget; }; AuraScript* GetAuraScript() const override { return new spell_pri_mana_leech_AuraScript(); } }; // 47948 - Pain and Suffering (Proc) class spell_pri_pain_and_suffering_proc : public SpellScriptLoader { public: spell_pri_pain_and_suffering_proc() : SpellScriptLoader("spell_pri_pain_and_suffering_proc") { } class spell_pri_pain_and_suffering_proc_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_pain_and_suffering_proc_SpellScript); void HandleEffectScriptEffect(SpellEffIndex /*effIndex*/) { Unit* caster = GetCaster(); // Refresh Shadow Word: Pain on target if (Unit* target = GetHitUnit()) if (AuraEffect* aur = target->GetAuraEffect(SPELL_AURA_PERIODIC_DAMAGE, SPELLFAMILY_PRIEST, flag128(0x8000, 0, 0), caster->GetGUID())) { uint32 damage = std::max(aur->GetAmount(), 0); sScriptMgr->ModifyPeriodicDamageAurasTick(target, caster, damage); aur->SetDamage(caster->SpellDamageBonusDone(target, aur->GetSpellInfo(), damage, DOT, aur->GetSpellEffectInfo()) * aur->GetDonePct()); aur->CalculatePeriodic(caster, false, false); aur->GetBase()->RefreshDuration(); } } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_pri_pain_and_suffering_proc_SpellScript::HandleEffectScriptEffect, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); } }; SpellScript* GetSpellScript() const override { return new spell_pri_pain_and_suffering_proc_SpellScript; } }; // 47540 - Penance class spell_pri_penance : public SpellScriptLoader { public: spell_pri_penance() : SpellScriptLoader("spell_pri_penance") { } class spell_pri_penance_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_penance_SpellScript); bool Load() override { return GetCaster()->GetTypeId() == TYPEID_PLAYER; } bool Validate(SpellInfo const* spellInfo) override { SpellInfo const* firstRankSpellInfo = sSpellMgr->GetSpellInfo(SPELL_PRIEST_PENANCE_R1); if (!firstRankSpellInfo) return false; // can't use other spell than this penance due to spell_ranks dependency if (!spellInfo->IsRankOf(firstRankSpellInfo)) return false; uint8 rank = spellInfo->GetRank(); if (!sSpellMgr->GetSpellWithRank(SPELL_PRIEST_PENANCE_R1_DAMAGE, rank, true)) return false; if (!sSpellMgr->GetSpellWithRank(SPELL_PRIEST_PENANCE_R1_HEAL, rank, true)) return false; return true; } void HandleDummy(SpellEffIndex /*effIndex*/) { Unit* caster = GetCaster(); if (Unit* target = GetHitUnit()) { if (!target->IsAlive()) return; uint8 rank = GetSpellInfo()->GetRank(); if (caster->IsFriendlyTo(target)) caster->CastSpell(target, sSpellMgr->GetSpellWithRank(SPELL_PRIEST_PENANCE_R1_HEAL, rank), false); else caster->CastSpell(target, sSpellMgr->GetSpellWithRank(SPELL_PRIEST_PENANCE_R1_DAMAGE, rank), false); } } SpellCastResult CheckCast() { Player* caster = GetCaster()->ToPlayer(); if (Unit* target = GetExplTargetUnit()) { if (!caster->IsFriendlyTo(target)) { if (!caster->IsValidAttackTarget(target)) return SPELL_FAILED_BAD_TARGETS; if (!caster->isInFront(target)) return SPELL_FAILED_UNIT_NOT_INFRONT; } else { //Support for modifications of this spell in Legion with The Penitent talent (7.1.5) if(!caster->HasAura(SPELL_PRIEST_THE_PENITENT_AURA)) return SPELL_FAILED_BAD_TARGETS; if (!caster->isInFront(target)) return SPELL_FAILED_UNIT_NOT_INFRONT; } } return SPELL_CAST_OK; } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_pri_penance_SpellScript::HandleDummy, EFFECT_0, SPELL_EFFECT_DUMMY); OnCheckCast += SpellCheckCastFn(spell_pri_penance_SpellScript::CheckCast); } }; SpellScript* GetSpellScript() const override { return new spell_pri_penance_SpellScript; } }; // -47569 - Phantasm class spell_pri_phantasm : public SpellScriptLoader { public: spell_pri_phantasm() : SpellScriptLoader("spell_pri_phantasm") { } class spell_pri_phantasm_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_phantasm_AuraScript); bool CheckProc(ProcEventInfo& /*eventInfo*/) { return roll_chance_i(GetEffect(EFFECT_0)->GetAmount()); } void HandleEffectProc(AuraEffect const* /*aurEff*/, ProcEventInfo& /*eventInfo*/) { PreventDefaultAction(); GetTarget()->RemoveMovementImpairingAuras(); } void Register() override { DoCheckProc += AuraCheckProcFn(spell_pri_phantasm_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_phantasm_AuraScript::HandleEffectProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_phantasm_AuraScript(); } }; // 17 - Power Word: Shield class spell_pri_power_word_shield : public SpellScriptLoader { public: spell_pri_power_word_shield() : SpellScriptLoader("spell_pri_power_word_shield") { } class spell_pri_power_word_shield_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_power_word_shield_AuraScript); void CalculateAmount(AuraEffect const* /*auraEffect*/, int32& amount, bool& canBeRecalculated) { canBeRecalculated = false; if (Player* player = GetCaster()->ToPlayer()) { int32 playerMastery = player->GetRatingBonusValue(CR_MASTERY); int32 playerSpellPower = player->SpellBaseDamageBonusDone(SPELL_SCHOOL_MASK_HOLY); int32 playerVersatileDamage = player->GetRatingBonusValue(CR_VERSATILITY_DAMAGE_DONE); //Formula taken from SpellWork amount = (int32)((playerSpellPower * 5.5f) + playerMastery) * (1 + playerVersatileDamage); } } void HandleOnApply(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { Unit* caster = GetCaster(); Unit* target = GetTarget(); if (!caster) return; if (caster->HasAura(SPELL_PRIEST_BODY_AND_SOUL)) caster->CastSpell(target, SPELL_PRIEST_BODY_AND_SOUL_SPEED, true); if (caster->HasAura(SPELL_PRIEST_STRENGTH_OF_SOUL)) caster->CastSpell(target, SPELL_PRIEST_STRENGTH_OF_SOUL_EFFECT, true); if (caster->HasAura(SPELL_PRIEST_RENEWED_HOPE)) caster->CastSpell(target, SPELL_PRIEST_RENEWED_HOPE_EFFECT, true); if (caster->HasAura(SPELL_PRIEST_VOID_SHIELD) && caster == target) caster->CastSpell(target, SPELL_PRIEST_VOID_SHIELD_EFFECT, true); if (caster->HasAura(SPELL_PRIEST_ATONEMENT)) caster->CastSpell(target, SPELL_PRIEST_ATONEMENT_TRIGGERED, true); } void HandleOnRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { GetTarget()->RemoveAura(SPELL_PRIEST_STRENGTH_OF_SOUL_EFFECT); if (Unit* caster = GetCaster()) if (GetTargetApplication()->GetRemoveMode() == AURA_REMOVE_BY_ENEMY_SPELL && caster->HasAura(SPELL_PRIEST_SHIELD_DISCIPLINE_PASSIVE)) caster->CastSpell(caster, SPELL_PRIEST_SHIELD_DISCIPLINE_ENERGIZE, true); } void Register() override { DoEffectCalcAmount += AuraEffectCalcAmountFn(spell_pri_power_word_shield_AuraScript::CalculateAmount, EFFECT_0, SPELL_AURA_SCHOOL_ABSORB); AfterEffectApply += AuraEffectApplyFn(spell_pri_power_word_shield_AuraScript::HandleOnApply, EFFECT_0, SPELL_AURA_SCHOOL_ABSORB, AURA_EFFECT_HANDLE_REAL_OR_REAPPLY_MASK); AfterEffectRemove += AuraEffectRemoveFn(spell_pri_power_word_shield_AuraScript::HandleOnRemove, EFFECT_0, SPELL_AURA_SCHOOL_ABSORB, AURA_EFFECT_HANDLE_REAL); } }; AuraScript* GetAuraScript() const override { return new spell_pri_power_word_shield_AuraScript(); } }; // 33110 - Prayer of Mending Heal class spell_pri_prayer_of_mending_heal : public SpellScriptLoader { public: spell_pri_prayer_of_mending_heal() : SpellScriptLoader("spell_pri_prayer_of_mending_heal") { } class spell_pri_prayer_of_mending_heal_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_prayer_of_mending_heal_SpellScript); void HandleHeal(SpellEffIndex /*effIndex*/) { if (Unit* caster = GetOriginalCaster()) { if (AuraEffect* aurEff = caster->GetAuraEffect(SPELL_PRIEST_T9_HEALING_2P, EFFECT_0)) { int32 heal = GetHitHeal(); AddPct(heal, aurEff->GetAmount()); SetHitHeal(heal); } } } void Register() override { OnEffectHitTarget += SpellEffectFn(spell_pri_prayer_of_mending_heal_SpellScript::HandleHeal, EFFECT_0, SPELL_EFFECT_HEAL); } }; SpellScript* GetSpellScript() const override { return new spell_pri_prayer_of_mending_heal_SpellScript(); } }; // 15473 - Shadowform class spell_pri_shadowform : public SpellScriptLoader { public: spell_pri_shadowform() : SpellScriptLoader("spell_pri_shadowform") { } class spell_pri_shadowform_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_shadowform_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_SHADOWFORM_VISUAL_WITHOUT_GLYPH, SPELL_PRIEST_SHADOWFORM_VISUAL_WITH_GLYPH }); } void HandleEffectApply(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { GetTarget()->CastSpell(GetTarget(), GetTarget()->HasAura(SPELL_PRIEST_GLYPH_OF_SHADOW) ? SPELL_PRIEST_SHADOWFORM_VISUAL_WITH_GLYPH : SPELL_PRIEST_SHADOWFORM_VISUAL_WITHOUT_GLYPH, true); } void HandleEffectRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) { GetTarget()->RemoveAurasDueToSpell(GetTarget()->HasAura(SPELL_PRIEST_GLYPH_OF_SHADOW) ? SPELL_PRIEST_SHADOWFORM_VISUAL_WITH_GLYPH : SPELL_PRIEST_SHADOWFORM_VISUAL_WITHOUT_GLYPH); } void Register() override { AfterEffectApply += AuraEffectApplyFn(spell_pri_shadowform_AuraScript::HandleEffectApply, EFFECT_0, SPELL_AURA_MOD_SHAPESHIFT, AURA_EFFECT_HANDLE_REAL_OR_REAPPLY_MASK); AfterEffectRemove += AuraEffectRemoveFn(spell_pri_shadowform_AuraScript::HandleEffectRemove, EFFECT_0, SPELL_AURA_MOD_SHAPESHIFT, AURA_EFFECT_HANDLE_REAL_OR_REAPPLY_MASK); } }; AuraScript* GetAuraScript() const override { return new spell_pri_shadowform_AuraScript(); } }; // 20711 - Spirit of Redemption class spell_priest_spirit_of_redemption : public AuraScript { PrepareAuraScript(spell_priest_spirit_of_redemption); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_SPIRIT_OF_REDEMPTION }); } void HandleAbsorb(AuraEffect* aurEff, DamageInfo& dmgInfo, uint32& /*absorbAmount*/) { Unit* target = GetTarget(); if (dmgInfo.GetDamage() >= target->GetHealth()) { target->CastSpell(target, SPELL_PRIEST_SPIRIT_OF_REDEMPTION, TRIGGERED_FULL_MASK, nullptr, aurEff); target->SetFullHealth(); return; } PreventDefaultAction(); } void Register() override { OnEffectAbsorb += AuraEffectAbsorbFn(spell_priest_spirit_of_redemption::HandleAbsorb, EFFECT_0); } }; // 28809 - Greater Heal class spell_pri_t3_4p_bonus : public SpellScriptLoader { public: spell_pri_t3_4p_bonus() : SpellScriptLoader("spell_pri_t3_4p_bonus") { } class spell_pri_t3_4p_bonus_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_t3_4p_bonus_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_ARMOR_OF_FAITH }); } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); eventInfo.GetActor()->CastSpell(eventInfo.GetProcTarget(), SPELL_PRIEST_ARMOR_OF_FAITH, true, nullptr, aurEff); } void Register() override { OnEffectProc += AuraEffectProcFn(spell_pri_t3_4p_bonus_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_t3_4p_bonus_AuraScript(); } }; // 37594 - Greater Heal Refund class spell_pri_t5_heal_2p_bonus : public SpellScriptLoader { public: spell_pri_t5_heal_2p_bonus() : SpellScriptLoader("spell_pri_t5_heal_2p_bonus") { } class spell_pri_t5_heal_2p_bonus_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_t5_heal_2p_bonus_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_ITEM_EFFICIENCY }); } bool CheckProc(ProcEventInfo& eventInfo) { if (HealInfo* healInfo = eventInfo.GetHealInfo()) if (Unit* healTarget = healInfo->GetTarget()) if (healInfo->GetEffectiveHeal()) if (healTarget->GetHealth() >= healTarget->GetMaxHealth()) return true; return false; } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& /*eventInfo*/) { PreventDefaultAction(); GetTarget()->CastSpell(GetTarget(), SPELL_PRIEST_ITEM_EFFICIENCY, true, nullptr, aurEff); } void Register() override { DoCheckProc += AuraCheckProcFn(spell_pri_t5_heal_2p_bonus_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_t5_heal_2p_bonus_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_PROC_TRIGGER_SPELL); } }; AuraScript* GetAuraScript() const override { return new spell_pri_t5_heal_2p_bonus_AuraScript(); } }; // 70770 - Item - Priest T10 Healer 2P Bonus class spell_pri_t10_heal_2p_bonus : public SpellScriptLoader { public: spell_pri_t10_heal_2p_bonus() : SpellScriptLoader("spell_pri_t10_heal_2p_bonus") { } class spell_pri_t10_heal_2p_bonus_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_t10_heal_2p_bonus_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_BLESSED_HEALING }); } void HandleProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); HealInfo* healInfo = eventInfo.GetHealInfo(); if (!healInfo || !healInfo->GetHeal()) return; SpellInfo const* spellInfo = sSpellMgr->AssertSpellInfo(SPELL_PRIEST_BLESSED_HEALING); int32 amount = CalculatePct(static_cast<int32>(healInfo->GetHeal()), aurEff->GetAmount()); amount /= spellInfo->GetMaxTicks(DIFFICULTY_NONE); // Add remaining ticks to healing done Unit* caster = eventInfo.GetActor(); Unit* target = eventInfo.GetProcTarget(); amount += target->GetRemainingPeriodicAmount(caster->GetGUID(), SPELL_PRIEST_BLESSED_HEALING, SPELL_AURA_PERIODIC_HEAL); caster->CastCustomSpell(SPELL_PRIEST_BLESSED_HEALING, SPELLVALUE_BASE_POINT0, amount, target, true, nullptr, aurEff); } void Register() override { OnEffectProc += AuraEffectProcFn(spell_pri_t10_heal_2p_bonus_AuraScript::HandleProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_t10_heal_2p_bonus_AuraScript(); } }; // 15286 - Vampiric Embrace class spell_pri_vampiric_embrace : public SpellScriptLoader { public: spell_pri_vampiric_embrace() : SpellScriptLoader("spell_pri_vampiric_embrace") { } class spell_pri_vampiric_embrace_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_vampiric_embrace_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_VAMPIRIC_EMBRACE_HEAL }); } bool CheckProc(ProcEventInfo& eventInfo) { // Not proc from Mind Sear return !(eventInfo.GetDamageInfo()->GetSpellInfo()->SpellFamilyFlags[1] & 0x80000); } void HandleEffectProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); DamageInfo* damageInfo = eventInfo.GetDamageInfo(); if (!damageInfo || !damageInfo->GetDamage()) return; int32 selfHeal = int32(CalculatePct(damageInfo->GetDamage(), aurEff->GetAmount())); int32 teamHeal = selfHeal / 2; GetTarget()->CastCustomSpell((Unit*)NULL, SPELL_PRIEST_VAMPIRIC_EMBRACE_HEAL, &teamHeal, &selfHeal, NULL, true, NULL, aurEff); } void Register() override { DoCheckProc += AuraCheckProcFn(spell_pri_vampiric_embrace_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_vampiric_embrace_AuraScript::HandleEffectProc, EFFECT_0, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_vampiric_embrace_AuraScript(); } }; // 15290 - Vampiric Embrace (heal) class spell_pri_vampiric_embrace_target : public SpellScriptLoader { public: spell_pri_vampiric_embrace_target() : SpellScriptLoader("spell_pri_vampiric_embrace_target") { } class spell_pri_vampiric_embrace_target_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_vampiric_embrace_target_SpellScript); void FilterTargets(std::list<WorldObject*>& unitList) { unitList.remove(GetCaster()); } void Register() override { OnObjectAreaTargetSelect += SpellObjectAreaTargetSelectFn(spell_pri_vampiric_embrace_target_SpellScript::FilterTargets, EFFECT_0, TARGET_UNIT_CASTER_AREA_PARTY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_vampiric_embrace_target_SpellScript(); } }; // 34914 - Vampiric Touch class spell_pri_vampiric_touch : public SpellScriptLoader { public: spell_pri_vampiric_touch() : SpellScriptLoader("spell_pri_vampiric_touch") { } class spell_pri_vampiric_touch_AuraScript : public AuraScript { PrepareAuraScript(spell_pri_vampiric_touch_AuraScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_VAMPIRIC_TOUCH_DISPEL, SPELL_GEN_REPLENISHMENT }); } void HandleDispel(DispelInfo* /*dispelInfo*/) { if (Unit* caster = GetCaster()) { if (Unit* target = GetUnitOwner()) { if (AuraEffect const* aurEff = GetEffect(EFFECT_1)) { int32 damage = aurEff->GetAmount() * 8; // backfire damage caster->CastCustomSpell(target, SPELL_PRIEST_VAMPIRIC_TOUCH_DISPEL, &damage, NULL, NULL, true, NULL, aurEff); } } } } bool CheckProc(ProcEventInfo& eventInfo) { return eventInfo.GetProcTarget() == GetCaster(); } void HandleEffectProc(AuraEffect const* aurEff, ProcEventInfo& eventInfo) { PreventDefaultAction(); eventInfo.GetProcTarget()->CastSpell((Unit*)NULL, SPELL_GEN_REPLENISHMENT, true, NULL, aurEff); } void Register() override { AfterDispel += AuraDispelFn(spell_pri_vampiric_touch_AuraScript::HandleDispel); DoCheckProc += AuraCheckProcFn(spell_pri_vampiric_touch_AuraScript::CheckProc); OnEffectProc += AuraEffectProcFn(spell_pri_vampiric_touch_AuraScript::HandleEffectProc, EFFECT_2, SPELL_AURA_DUMMY); } }; AuraScript* GetAuraScript() const override { return new spell_pri_vampiric_touch_AuraScript(); } }; // 121536 - Angelic Feather talent class spell_pri_angelic_feather_trigger : public SpellScriptLoader { public: spell_pri_angelic_feather_trigger() : SpellScriptLoader("spell_pri_angelic_feather_trigger") { } class spell_pri_angelic_feather_trigger_SpellScript : public SpellScript { PrepareSpellScript(spell_pri_angelic_feather_trigger_SpellScript); bool Validate(SpellInfo const* /*spellInfo*/) override { return ValidateSpellInfo({ SPELL_PRIEST_ANGELIC_FEATHER_AREATRIGGER }); } void HandleEffectDummy(SpellEffIndex /*effIndex*/) { Position destPos = GetHitDest()->GetPosition(); float radius = GetEffectInfo()->CalcRadius(); // Caster is prioritary if (GetCaster()->IsWithinDist2d(&destPos, radius)) { GetCaster()->CastSpell(GetCaster(), SPELL_PRIEST_ANGELIC_FEATHER_AURA, true); } else { SpellCastTargets targets; targets.SetDst(destPos); GetCaster()->CastSpell(targets, sSpellMgr->GetSpellInfo(SPELL_PRIEST_ANGELIC_FEATHER_AREATRIGGER), nullptr); } } void Register() override { OnEffectHit += SpellEffectFn(spell_pri_angelic_feather_trigger_SpellScript::HandleEffectDummy, EFFECT_0, SPELL_EFFECT_DUMMY); } }; SpellScript* GetSpellScript() const override { return new spell_pri_angelic_feather_trigger_SpellScript(); } }; // Angelic Feather areatrigger - created by SPELL_PRIEST_ANGELIC_FEATHER_AREATRIGGER class areatrigger_pri_angelic_feather : public AreaTriggerEntityScript { public: areatrigger_pri_angelic_feather() : AreaTriggerEntityScript("areatrigger_pri_angelic_feather") { } struct areatrigger_pri_angelic_featherAI : AreaTriggerAI { areatrigger_pri_angelic_featherAI(AreaTrigger* areatrigger) : AreaTriggerAI(areatrigger) { } // Called when the AreaTrigger has just been initialized, just before added to map void OnInitialize() override { if (Unit* caster = at->GetCaster()) { std::vector<AreaTrigger*> areaTriggers = caster->GetAreaTriggers(SPELL_PRIEST_ANGELIC_FEATHER_AREATRIGGER); if (areaTriggers.size() >= 3) areaTriggers.front()->SetDuration(0); } } void OnUnitEnter(Unit* unit) override { if (Unit* caster = at->GetCaster()) { if (caster->IsFriendlyTo(unit)) { // If target already has aura, increase duration to max 130% of initial duration caster->CastSpell(unit, SPELL_PRIEST_ANGELIC_FEATHER_AURA, true); at->SetDuration(0); } } } }; AreaTriggerAI* GetAI(AreaTrigger* areatrigger) const override { return new areatrigger_pri_angelic_featherAI(areatrigger); } }; void AddSC_priest_spell_scripts() { new spell_pri_aq_3p_bonus(); new spell_pri_body_and_soul(); new spell_pri_atonement(); new spell_pri_atonement_triggered(); new spell_pri_circle_of_healing(); new spell_pri_dispel_magic(); new spell_pri_divine_aegis(); new spell_pri_divine_hymn(); new spell_pri_glyph_of_prayer_of_healing(); new spell_pri_hymn_of_hope(); new spell_pri_improved_power_word_shield(); new spell_pri_guardian_spirit(); new spell_pri_item_t6_trinket(); new spell_pri_leap_of_faith_effect_trigger(); new spell_pri_levitate(); new spell_pri_lightwell_renew(); new spell_pri_mana_burn(); new spell_pri_mana_leech(); new spell_pri_pain_and_suffering_proc(); new spell_pri_penance(); new spell_pri_phantasm(); new spell_pri_power_word_shield(); new spell_pri_prayer_of_mending_heal(); new spell_pri_shadowform(); RegisterAuraScript(spell_priest_spirit_of_redemption); new spell_pri_t3_4p_bonus(); new spell_pri_t5_heal_2p_bonus(); new spell_pri_t10_heal_2p_bonus(); new spell_pri_vampiric_embrace(); new spell_pri_vampiric_embrace_target(); new spell_pri_vampiric_touch(); new spell_pri_angelic_feather_trigger(); new areatrigger_pri_angelic_feather(); }
jameyboor/TrinityCore
src/server/scripts/Spells/spell_priest.cpp
C++
gpl-2.0
60,625
/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is Private Browsing Tests. * * The Initial Developer of the Original Code is * Ehsan Akhgari. * Portions created by the Initial Developer are Copyright (C) 2008 * the Initial Developer. All Rights Reserved. * * Contributor(s): * Ehsan Akhgari <ehsan.akhgari@gmail.com> (Original Author) * * Alternatively, the contents of this file may be used under the terms of * either of the GNU General Public License Version 2 or later (the "GPL"), * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ // This tests the private browsing service to make sure it implements its // documented interface correctly. // This test should run before the rest of private browsing service unit tests, // hence the naming used for this file. function run_test_on_service() { // initialization var os = Cc["@mozilla.org/observer-service;1"]. getService(Ci.nsIObserverService); // the contract ID should be available do_check_true(PRIVATEBROWSING_CONTRACT_ID in Cc); // the interface should be available do_check_true("nsIPrivateBrowsingService" in Ci); // it should be possible to initialize the component try { var pb = Cc[PRIVATEBROWSING_CONTRACT_ID]. getService(Ci.nsIPrivateBrowsingService); } catch (ex) { LOG("exception thrown when trying to get the service: " + ex); do_throw("private browsing service could not be initialized"); } // private browsing should be turned off initially do_check_false(pb.privateBrowsingEnabled); // private browsing not auto-started do_check_false(pb.autoStarted); // it should be possible to toggle its status pb.privateBrowsingEnabled = true; do_check_true(pb.privateBrowsingEnabled); do_check_false(pb.autoStarted); pb.privateBrowsingEnabled = false; do_check_false(pb.privateBrowsingEnabled); do_check_false(pb.autoStarted); // test the private-browsing notification var observer = { observe: function(aSubject, aTopic, aData) { if (aTopic == kPrivateBrowsingNotification) this.data = aData; }, data: null }; os.addObserver(observer, kPrivateBrowsingNotification, false); pb.privateBrowsingEnabled = true; do_check_eq(observer.data, kEnter); pb.privateBrowsingEnabled = false; do_check_eq(observer.data, kExit); os.removeObserver(observer, kPrivateBrowsingNotification); // make sure that setting the private browsing mode from within an observer throws observer = { observe: function(aSubject, aTopic, aData) { if (aTopic == kPrivateBrowsingNotification) { try { pb.privateBrowsingEnabled = (aData == kEnter); do_throw("Setting privateBrowsingEnabled inside the " + aData + " notification should throw"); } catch (ex) { if (!("result" in ex && ex.result == Cr.NS_ERROR_FAILURE)) do_throw("Unexpected exception caught: " + ex); } } } }; os.addObserver(observer, kPrivateBrowsingNotification, false); pb.privateBrowsingEnabled = true; do_check_true(pb.privateBrowsingEnabled); // the exception should not interfere with the mode change pb.privateBrowsingEnabled = false; do_check_false(pb.privateBrowsingEnabled); // the exception should not interfere with the mode change os.removeObserver(observer, kPrivateBrowsingNotification); // make sure that getting the private browsing mode from within an observer doesn't throw observer = { observe: function(aSubject, aTopic, aData) { if (aTopic == kPrivateBrowsingNotification) { try { var dummy = pb.privateBrowsingEnabled; if (aData == kEnter) do_check_true(dummy); else if (aData == kExit) do_check_false(dummy); } catch (ex) { do_throw("Unexpected exception caught: " + ex); } } } }; os.addObserver(observer, kPrivateBrowsingNotification, false); pb.privateBrowsingEnabled = true; do_check_true(pb.privateBrowsingEnabled); // just a sanity check pb.privateBrowsingEnabled = false; do_check_false(pb.privateBrowsingEnabled); // just a sanity check os.removeObserver(observer, kPrivateBrowsingNotification); // check that the private-browsing-cancel-vote notification is sent before the // private-browsing notification observer = { observe: function(aSubject, aTopic, aData) { switch (aTopic) { case kPrivateBrowsingCancelVoteNotification: case kPrivateBrowsingNotification: this.notifications.push(aTopic + " " + aData); } }, notifications: [] }; os.addObserver(observer, kPrivateBrowsingCancelVoteNotification, false); os.addObserver(observer, kPrivateBrowsingNotification, false); pb.privateBrowsingEnabled = true; do_check_true(pb.privateBrowsingEnabled); // just a sanity check pb.privateBrowsingEnabled = false; do_check_false(pb.privateBrowsingEnabled); // just a sanity check os.removeObserver(observer, kPrivateBrowsingNotification); os.removeObserver(observer, kPrivateBrowsingCancelVoteNotification); var reference_order = [ kPrivateBrowsingCancelVoteNotification + " " + kEnter, kPrivateBrowsingNotification + " " + kEnter, kPrivateBrowsingCancelVoteNotification + " " + kExit, kPrivateBrowsingNotification + " " + kExit ]; do_check_eq(observer.notifications.join(","), reference_order.join(",")); // make sure that the private-browsing-cancel-vote notification can be used // to cancel the mode switch observer = { observe: function(aSubject, aTopic, aData) { switch (aTopic) { case kPrivateBrowsingCancelVoteNotification: do_check_neq(aSubject, null); try { aSubject.QueryInterface(Ci.nsISupportsPRBool); } catch (ex) { do_throw("aSubject in " + kPrivateBrowsingCancelVoteNotification + " should implement nsISupportsPRBool"); } do_check_false(aSubject.data); aSubject.data = true; // cancel the mode switch // fall through case kPrivateBrowsingNotification: this.notifications.push(aTopic + " " + aData); } }, nextPhase: function() { this.notifications.push("enter phase " + (++this._phase)); }, notifications: [], _phase: 0 }; os.addObserver(observer, kPrivateBrowsingCancelVoteNotification, false); os.addObserver(observer, kPrivateBrowsingNotification, false); pb.privateBrowsingEnabled = true; do_check_false(pb.privateBrowsingEnabled); // should have been canceled // temporarily disable the observer os.removeObserver(observer, kPrivateBrowsingCancelVoteNotification); observer.nextPhase(); pb.privateBrowsingEnabled = true; // this time, should enter successfully do_check_true(pb.privateBrowsingEnabled); // should have been canceled // re-enable the observer os.addObserver(observer, kPrivateBrowsingCancelVoteNotification, false); pb.privateBrowsingEnabled = false; do_check_true(pb.privateBrowsingEnabled); // should have been canceled os.removeObserver(observer, kPrivateBrowsingCancelVoteNotification); observer.nextPhase(); pb.privateBrowsingEnabled = false; // this time, should exit successfully do_check_false(pb.privateBrowsingEnabled); os.removeObserver(observer, kPrivateBrowsingNotification); reference_order = [ kPrivateBrowsingCancelVoteNotification + " " + kEnter, "enter phase 1", kPrivateBrowsingNotification + " " + kEnter, kPrivateBrowsingCancelVoteNotification + " " + kExit, "enter phase 2", kPrivateBrowsingNotification + " " + kExit, ]; do_check_eq(observer.notifications.join(","), reference_order.join(",")); // make sure that the private browsing transition complete notification is // raised correctly. observer = { observe: function(aSubject, aTopic, aData) { this.notifications.push(aTopic + " " + aData); }, notifications: [] }; os.addObserver(observer, kPrivateBrowsingNotification, false); os.addObserver(observer, kPrivateBrowsingTransitionCompleteNotification, false); pb.privateBrowsingEnabled = true; pb.privateBrowsingEnabled = false; os.removeObserver(observer, kPrivateBrowsingNotification); os.removeObserver(observer, kPrivateBrowsingTransitionCompleteNotification); reference_order = [ kPrivateBrowsingNotification + " " + kEnter, kPrivateBrowsingTransitionCompleteNotification + " ", kPrivateBrowsingNotification + " " + kExit, kPrivateBrowsingTransitionCompleteNotification + " ", ]; do_check_eq(observer.notifications.join(","), reference_order.join(",")); } // Support running tests on both the service itself and its wrapper function run_test() { run_test_on_all_services(); }
freaktechnik/nightingale-hacking
dependencies/vendor/mozbrowser/components/privatebrowsing/test/unit/test_0-privatebrowsing.js
JavaScript
gpl-2.0
9,952
/* * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ /* * @test * @summary Redefine shared class. GC should not cause crash with cached resolved_references. * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds /test/hotspot/jtreg/runtime/cds/appcds/test-classes /test/hotspot/jtreg/runtime/cds/appcds/jvmti * @requires vm.cds.archived.java.heap * @build sun.hotspot.WhiteBox * RedefineClassApp * InstrumentationClassFileTransformer * InstrumentationRegisterClassFileTransformer * @run driver RedefineClassTest */ import com.sun.tools.attach.VirtualMachine; import com.sun.tools.attach.VirtualMachineDescriptor; import java.io.File; import java.util.List; import jdk.test.lib.Asserts; import jdk.test.lib.cds.CDSOptions; import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; public class RedefineClassTest { public static String bootClasses[] = { "RedefineClassApp$Intf", "RedefineClassApp$Bar", "sun.hotspot.WhiteBox", }; public static String appClasses[] = { "RedefineClassApp", "RedefineClassApp$Foo", }; public static String sharedClasses[] = TestCommon.concat(bootClasses, appClasses); public static String agentClasses[] = { "InstrumentationClassFileTransformer", "InstrumentationRegisterClassFileTransformer", "Util", }; public static void main(String[] args) throws Throwable { runTest(); } public static void runTest() throws Throwable { String bootJar = ClassFileInstaller.writeJar("RedefineClassBoot.jar", bootClasses); String appJar = ClassFileInstaller.writeJar("RedefineClassApp.jar", appClasses); String agentJar = ClassFileInstaller.writeJar("InstrumentationAgent.jar", ClassFileInstaller.Manifest.fromSourceFile("InstrumentationAgent.mf"), agentClasses); String bootCP = "-Xbootclasspath/a:" + bootJar; String agentCmdArg; agentCmdArg = "-javaagent:" + agentJar; TestCommon.testDump(appJar, sharedClasses, bootCP, "-Xlog:gc+region=trace"); OutputAnalyzer out = TestCommon.execAuto("-cp", appJar, bootCP, "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI", "-Xlog:cds=info", agentCmdArg, "RedefineClassApp", bootJar, appJar); out.reportDiagnosticSummary(); CDSOptions opts = (new CDSOptions()).setXShareMode("auto"); TestCommon.checkExec(out, opts); } }
md-5/jdk10
test/hotspot/jtreg/runtime/cds/appcds/cacheObject/RedefineClassTest.java
Java
gpl-2.0
3,675
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/kernel.h> #include <linux/bio.h> #include <linux/buffer_head.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/mount.h> #include <linux/mpage.h> #include <linux/namei.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/compat.h> #include <linux/bit_spinlock.h> #include <linux/security.h> #include <linux/xattr.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/uuid.h> #include <linux/btrfs.h> #include <linux/uaccess.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "print-tree.h" #include "volumes.h" #include "locking.h" #include "inode-map.h" #include "backref.h" #include "rcu-string.h" #include "send.h" #include "dev-replace.h" #include "props.h" #include "sysfs.h" #include "qgroup.h" #include "tree-log.h" #include "compression.h" #ifdef CONFIG_64BIT /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI * structures are incorrect, as the timespec structure from userspace * is 4 bytes too small. We define these alternatives here to teach * the kernel about the 32-bit struct packing. */ struct btrfs_ioctl_timespec_32 { __u64 sec; __u32 nsec; } __attribute__ ((__packed__)); struct btrfs_ioctl_received_subvol_args_32 { char uuid[BTRFS_UUID_SIZE]; /* in */ __u64 stransid; /* in */ __u64 rtransid; /* out */ struct btrfs_ioctl_timespec_32 stime; /* in */ struct btrfs_ioctl_timespec_32 rtime; /* out */ __u64 flags; /* in */ __u64 reserved[16]; /* in */ } __attribute__ ((__packed__)); #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \ struct btrfs_ioctl_received_subvol_args_32) #endif static int btrfs_clone(struct inode *src, struct inode *inode, u64 off, u64 olen, u64 olen_aligned, u64 destoff, int no_time_update); /* Mask out flags that are inappropriate for the given type of inode. */ static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) { if (S_ISDIR(mode)) return flags; else if (S_ISREG(mode)) return flags & ~FS_DIRSYNC_FL; else return flags & (FS_NODUMP_FL | FS_NOATIME_FL); } /* * Export inode flags to the format expected by the FS_IOC_GETFLAGS ioctl. */ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) { unsigned int iflags = 0; if (flags & BTRFS_INODE_SYNC) iflags |= FS_SYNC_FL; if (flags & BTRFS_INODE_IMMUTABLE) iflags |= FS_IMMUTABLE_FL; if (flags & BTRFS_INODE_APPEND) iflags |= FS_APPEND_FL; if (flags & BTRFS_INODE_NODUMP) iflags |= FS_NODUMP_FL; if (flags & BTRFS_INODE_NOATIME) iflags |= FS_NOATIME_FL; if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; if (flags & BTRFS_INODE_NODATACOW) iflags |= FS_NOCOW_FL; if (flags & BTRFS_INODE_NOCOMPRESS) iflags |= FS_NOCOMP_FL; else if (flags & BTRFS_INODE_COMPRESS) iflags |= FS_COMPR_FL; return iflags; } /* * Update inode->i_flags based on the btrfs internal flags. */ void btrfs_update_iflags(struct inode *inode) { struct btrfs_inode *ip = BTRFS_I(inode); unsigned int new_fl = 0; if (ip->flags & BTRFS_INODE_SYNC) new_fl |= S_SYNC; if (ip->flags & BTRFS_INODE_IMMUTABLE) new_fl |= S_IMMUTABLE; if (ip->flags & BTRFS_INODE_APPEND) new_fl |= S_APPEND; if (ip->flags & BTRFS_INODE_NOATIME) new_fl |= S_NOATIME; if (ip->flags & BTRFS_INODE_DIRSYNC) new_fl |= S_DIRSYNC; set_mask_bits(&inode->i_flags, S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC, new_fl); } static int btrfs_ioctl_getflags(struct file *file, void __user *arg) { struct btrfs_inode *ip = BTRFS_I(file_inode(file)); unsigned int flags = btrfs_flags_to_ioctl(ip->flags); if (copy_to_user(arg, &flags, sizeof(flags))) return -EFAULT; return 0; } static int check_flags(unsigned int flags) { if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL | \ FS_NOCOMP_FL | FS_COMPR_FL | FS_NOCOW_FL)) return -EOPNOTSUPP; if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; return 0; } static int btrfs_ioctl_setflags(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_inode *ip = BTRFS_I(inode); struct btrfs_root *root = ip->root; struct btrfs_trans_handle *trans; unsigned int flags, oldflags; int ret; u64 ip_oldflags; unsigned int i_oldflags; umode_t mode; if (!inode_owner_or_capable(inode)) return -EPERM; if (btrfs_root_readonly(root)) return -EROFS; if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; ret = check_flags(flags); if (ret) return ret; ret = mnt_want_write_file(file); if (ret) return ret; inode_lock(inode); ip_oldflags = ip->flags; i_oldflags = inode->i_flags; mode = inode->i_mode; flags = btrfs_mask_flags(inode->i_mode, flags); oldflags = btrfs_flags_to_ioctl(ip->flags); if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { ret = -EPERM; goto out_unlock; } } if (flags & FS_SYNC_FL) ip->flags |= BTRFS_INODE_SYNC; else ip->flags &= ~BTRFS_INODE_SYNC; if (flags & FS_IMMUTABLE_FL) ip->flags |= BTRFS_INODE_IMMUTABLE; else ip->flags &= ~BTRFS_INODE_IMMUTABLE; if (flags & FS_APPEND_FL) ip->flags |= BTRFS_INODE_APPEND; else ip->flags &= ~BTRFS_INODE_APPEND; if (flags & FS_NODUMP_FL) ip->flags |= BTRFS_INODE_NODUMP; else ip->flags &= ~BTRFS_INODE_NODUMP; if (flags & FS_NOATIME_FL) ip->flags |= BTRFS_INODE_NOATIME; else ip->flags &= ~BTRFS_INODE_NOATIME; if (flags & FS_DIRSYNC_FL) ip->flags |= BTRFS_INODE_DIRSYNC; else ip->flags &= ~BTRFS_INODE_DIRSYNC; if (flags & FS_NOCOW_FL) { if (S_ISREG(mode)) { /* * It's safe to turn csums off here, no extents exist. * Otherwise we want the flag to reflect the real COW * status of the file and will not set it. */ if (inode->i_size == 0) ip->flags |= BTRFS_INODE_NODATACOW | BTRFS_INODE_NODATASUM; } else { ip->flags |= BTRFS_INODE_NODATACOW; } } else { /* * Revert back under same assumptions as above */ if (S_ISREG(mode)) { if (inode->i_size == 0) ip->flags &= ~(BTRFS_INODE_NODATACOW | BTRFS_INODE_NODATASUM); } else { ip->flags &= ~BTRFS_INODE_NODATACOW; } } /* * The COMPRESS flag can only be changed by users, while the NOCOMPRESS * flag may be changed automatically if compression code won't make * things smaller. */ if (flags & FS_NOCOMP_FL) { ip->flags &= ~BTRFS_INODE_COMPRESS; ip->flags |= BTRFS_INODE_NOCOMPRESS; ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0); if (ret && ret != -ENODATA) goto out_drop; } else if (flags & FS_COMPR_FL) { const char *comp; ip->flags |= BTRFS_INODE_COMPRESS; ip->flags &= ~BTRFS_INODE_NOCOMPRESS; if (fs_info->compress_type == BTRFS_COMPRESS_LZO) comp = "lzo"; else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB) comp = "zlib"; else comp = "zstd"; ret = btrfs_set_prop(inode, "btrfs.compression", comp, strlen(comp), 0); if (ret) goto out_drop; } else { ret = btrfs_set_prop(inode, "btrfs.compression", NULL, 0, 0); if (ret && ret != -ENODATA) goto out_drop; ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_drop; } btrfs_update_iflags(inode); inode_inc_iversion(inode); inode->i_ctime = current_time(inode); ret = btrfs_update_inode(trans, root, inode); btrfs_end_transaction(trans); out_drop: if (ret) { ip->flags = ip_oldflags; inode->i_flags = i_oldflags; } out_unlock: inode_unlock(inode); mnt_drop_write_file(file); return ret; } static int btrfs_ioctl_getversion(struct file *file, int __user *arg) { struct inode *inode = file_inode(file); return put_user(inode->i_generation, arg); } static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_device *device; struct request_queue *q; struct fstrim_range range; u64 minlen = ULLONG_MAX; u64 num_devices = 0; u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy); int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; rcu_read_lock(); list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, dev_list) { if (!device->bdev) continue; q = bdev_get_queue(device->bdev); if (blk_queue_discard(q)) { num_devices++; minlen = min_t(u64, q->limits.discard_granularity, minlen); } } rcu_read_unlock(); if (!num_devices) return -EOPNOTSUPP; if (copy_from_user(&range, arg, sizeof(range))) return -EFAULT; if (range.start > total_bytes || range.len < fs_info->sb->s_blocksize) return -EINVAL; range.len = min(range.len, total_bytes - range.start); range.minlen = max(range.minlen, minlen); ret = btrfs_trim_fs(fs_info, &range); if (ret < 0) return ret; if (copy_to_user(arg, &range, sizeof(range))) return -EFAULT; return 0; } int btrfs_is_empty_uuid(u8 *uuid) { int i; for (i = 0; i < BTRFS_UUID_SIZE; i++) { if (uuid[i]) return 0; } return 1; } static noinline int create_subvol(struct inode *dir, struct dentry *dentry, const char *name, int namelen, u64 *async_transid, struct btrfs_qgroup_inherit *inherit) { struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct btrfs_trans_handle *trans; struct btrfs_key key; struct btrfs_root_item *root_item; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *new_root; struct btrfs_block_rsv block_rsv; struct timespec cur_time = current_time(dir); struct inode *inode; int ret; int err; u64 objectid; u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; u64 qgroup_reserved; uuid_le new_uuid; root_item = kzalloc(sizeof(*root_item), GFP_KERNEL); if (!root_item) return -ENOMEM; ret = btrfs_find_free_objectid(fs_info->tree_root, &objectid); if (ret) goto fail_free; /* * Don't create subvolume whose level is not zero. Or qgroup will be * screwed up since it assumes subvolume qgroup's level to be 0. */ if (btrfs_qgroup_level(objectid)) { ret = -ENOSPC; goto fail_free; } btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); /* * The same as the snapshot creation, please see the comment * of create_snapshot(). */ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, &qgroup_reserved, false); if (ret) goto fail_free; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_subvolume_release_metadata(fs_info, &block_rsv); goto fail_free; } trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; ret = btrfs_qgroup_inherit(trans, fs_info, 0, objectid, inherit); if (ret) goto fail; leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); goto fail; } memzero_extent_buffer(leaf, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(leaf, leaf->start); btrfs_set_header_generation(leaf, trans->transid); btrfs_set_header_backref_rev(leaf, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(leaf, objectid); write_extent_buffer_fsid(leaf, fs_info->fsid); write_extent_buffer_chunk_tree_uuid(leaf, fs_info->chunk_tree_uuid); btrfs_mark_buffer_dirty(leaf); inode_item = &root_item->inode; btrfs_set_stack_inode_generation(inode_item, 1); btrfs_set_stack_inode_size(inode_item, 3); btrfs_set_stack_inode_nlink(inode_item, 1); btrfs_set_stack_inode_nbytes(inode_item, fs_info->nodesize); btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); btrfs_set_root_flags(root_item, 0); btrfs_set_root_limit(root_item, 0); btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT); btrfs_set_root_bytenr(root_item, leaf->start); btrfs_set_root_generation(root_item, trans->transid); btrfs_set_root_level(root_item, 0); btrfs_set_root_refs(root_item, 1); btrfs_set_root_used(root_item, leaf->len); btrfs_set_root_last_snapshot(root_item, 0); btrfs_set_root_generation_v2(root_item, btrfs_root_generation(root_item)); uuid_le_gen(&new_uuid); memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec); btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec); root_item->ctime = root_item->otime; btrfs_set_root_ctransid(root_item, trans->transid); btrfs_set_root_otransid(root_item, trans->transid); btrfs_tree_unlock(leaf); free_extent_buffer(leaf); leaf = NULL; btrfs_set_root_dirid(root_item, new_dirid); key.objectid = objectid; key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; ret = btrfs_insert_root(trans, fs_info->tree_root, &key, root_item); if (ret) goto fail; key.offset = (u64)-1; new_root = btrfs_read_fs_root_no_name(fs_info, &key); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); btrfs_abort_transaction(trans, ret); goto fail; } btrfs_record_root_in_trans(trans, new_root); ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid); if (ret) { /* We potentially lose an unused inode item here */ btrfs_abort_transaction(trans, ret); goto fail; } mutex_lock(&new_root->objectid_mutex); new_root->highest_objectid = new_dirid; mutex_unlock(&new_root->objectid_mutex); /* * insert the directory item */ ret = btrfs_set_inode_index(BTRFS_I(dir), &index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_insert_dir_item(trans, root, name, namelen, BTRFS_I(dir), &key, BTRFS_FT_DIR, index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); ret = btrfs_add_root_ref(trans, fs_info, objectid, root->root_key.objectid, btrfs_ino(BTRFS_I(dir)), index, name, namelen); BUG_ON(ret); ret = btrfs_uuid_tree_add(trans, fs_info, root_item->uuid, BTRFS_UUID_KEY_SUBVOL, objectid); if (ret) btrfs_abort_transaction(trans, ret); fail: kfree(root_item); trans->block_rsv = NULL; trans->bytes_reserved = 0; btrfs_subvolume_release_metadata(fs_info, &block_rsv); if (async_transid) { *async_transid = trans->transid; err = btrfs_commit_transaction_async(trans, 1); if (err) err = btrfs_commit_transaction(trans); } else { err = btrfs_commit_transaction(trans); } if (err && !ret) ret = err; if (!ret) { inode = btrfs_lookup_dentry(dir, dentry); if (IS_ERR(inode)) return PTR_ERR(inode); d_instantiate(dentry, inode); } return ret; fail_free: kfree(root_item); return ret; } static void btrfs_wait_for_no_snapshotting_writes(struct btrfs_root *root) { s64 writers; DEFINE_WAIT(wait); do { prepare_to_wait(&root->subv_writers->wait, &wait, TASK_UNINTERRUPTIBLE); writers = percpu_counter_sum(&root->subv_writers->counter); if (writers) schedule(); finish_wait(&root->subv_writers->wait, &wait); } while (writers); } static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct dentry *dentry, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct inode *inode; struct btrfs_pending_snapshot *pending_snapshot; struct btrfs_trans_handle *trans; int ret; if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) return -EINVAL; pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL); if (!pending_snapshot) return -ENOMEM; pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item), GFP_KERNEL); pending_snapshot->path = btrfs_alloc_path(); if (!pending_snapshot->root_item || !pending_snapshot->path) { ret = -ENOMEM; goto free_pending; } atomic_inc(&root->will_be_snapshotted); smp_mb__after_atomic(); btrfs_wait_for_no_snapshotting_writes(root); ret = btrfs_start_delalloc_inodes(root, 0); if (ret) goto dec_and_free; btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); btrfs_init_block_rsv(&pending_snapshot->block_rsv, BTRFS_BLOCK_RSV_TEMP); /* * 1 - parent dir inode * 2 - dir entries * 1 - root item * 2 - root ref/backref * 1 - root of snapshot * 1 - UUID item */ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, &pending_snapshot->block_rsv, 8, &pending_snapshot->qgroup_reserved, false); if (ret) goto dec_and_free; pending_snapshot->dentry = dentry; pending_snapshot->root = root; pending_snapshot->readonly = readonly; pending_snapshot->dir = dir; pending_snapshot->inherit = inherit; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto fail; } spin_lock(&fs_info->trans_lock); list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); spin_unlock(&fs_info->trans_lock); if (async_transid) { *async_transid = trans->transid; ret = btrfs_commit_transaction_async(trans, 1); if (ret) ret = btrfs_commit_transaction(trans); } else { ret = btrfs_commit_transaction(trans); } if (ret) goto fail; ret = pending_snapshot->error; if (ret) goto fail; ret = btrfs_orphan_cleanup(pending_snapshot->snap); if (ret) goto fail; inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto fail; } d_instantiate(dentry, inode); ret = 0; fail: btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); dec_and_free: if (atomic_dec_and_test(&root->will_be_snapshotted)) wake_up_atomic_t(&root->will_be_snapshotted); free_pending: kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); kfree(pending_snapshot); return ret; } /* copy of may_delete in fs/namei.c() * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do anything with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) { int error; if (d_really_is_negative(victim)) return -ENOENT; BUG_ON(d_inode(victim->d_parent) != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim))) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* copy of may_create in fs/namei.c() */ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) { if (d_really_is_positive(child)) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * Create a new subvolume below @parent. This is largely modeled after * sys_mkdirat and vfs_mkdir, but we only do a single component lookup * inside this filesystem so it's quite a bit simpler. */ static noinline int btrfs_mksubvol(const struct path *parent, const char *name, int namelen, struct btrfs_root *snap_src, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { struct inode *dir = d_inode(parent->dentry); struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); struct dentry *dentry; int error; error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); if (error == -EINTR) return error; dentry = lookup_one_len(name, parent->dentry, namelen); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; error = btrfs_may_create(dir, dentry); if (error) goto out_dput; /* * even if this name doesn't exist, we may get hash collisions. * check for them now when we can safely fail */ error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, dir->i_ino, name, namelen); if (error) goto out_dput; down_read(&fs_info->subvol_sem); if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0) goto out_up_read; if (snap_src) { error = create_snapshot(snap_src, dir, dentry, async_transid, readonly, inherit); } else { error = create_subvol(dir, dentry, name, namelen, async_transid, inherit); } if (!error) fsnotify_mkdir(dir, dentry); out_up_read: up_read(&fs_info->subvol_sem); out_dput: dput(dentry); out_unlock: inode_unlock(dir); return error; } /* * When we're defragging a range, we don't want to kick it off again * if it is really just waiting for delalloc to send it down. * If we find a nice big extent or delalloc range for the bytes in the * file you want to defrag, we return 0 to let you know to skip this * part of the file */ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh) { struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em = NULL; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; u64 end; read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE); read_unlock(&em_tree->lock); if (em) { end = extent_map_end(em); free_extent_map(em); if (end - offset > thresh) return 0; } /* if we already have a nice delalloc here, just stop */ thresh /= 2; end = count_range_bits(io_tree, &offset, offset + thresh, thresh, EXTENT_DELALLOC, 1); if (end >= thresh) return 0; return 1; } /* * helper function to walk through a file and find extents * newer than a specific transid, and smaller than thresh. * * This is used by the defragging code to find new and small * extents */ static int find_new_extents(struct btrfs_root *root, struct inode *inode, u64 newer_than, u64 *off, u32 thresh) { struct btrfs_path *path; struct btrfs_key min_key; struct extent_buffer *leaf; struct btrfs_file_extent_item *extent; int type; int ret; u64 ino = btrfs_ino(BTRFS_I(inode)); path = btrfs_alloc_path(); if (!path) return -ENOMEM; min_key.objectid = ino; min_key.type = BTRFS_EXTENT_DATA_KEY; min_key.offset = *off; while (1) { ret = btrfs_search_forward(root, &min_key, path, newer_than); if (ret != 0) goto none; process_slot: if (min_key.objectid != ino) goto none; if (min_key.type != BTRFS_EXTENT_DATA_KEY) goto none; leaf = path->nodes[0]; extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); type = btrfs_file_extent_type(leaf, extent); if (type == BTRFS_FILE_EXTENT_REG && btrfs_file_extent_num_bytes(leaf, extent) < thresh && check_defrag_in_cache(inode, min_key.offset, thresh)) { *off = min_key.offset; btrfs_free_path(path); return 0; } path->slots[0]++; if (path->slots[0] < btrfs_header_nritems(leaf)) { btrfs_item_key_to_cpu(leaf, &min_key, path->slots[0]); goto process_slot; } if (min_key.offset == (u64)-1) goto none; min_key.offset++; btrfs_release_path(path); } none: btrfs_free_path(path); return -ENOENT; } static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) { struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em; u64 len = PAGE_SIZE; /* * hopefully we have this extent in the tree already, try without * the full extent lock */ read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); read_unlock(&em_tree->lock); if (!em) { struct extent_state *cached = NULL; u64 end = start + len - 1; /* get the big lock and read metadata off disk */ lock_extent_bits(io_tree, start, end, &cached); em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS); if (IS_ERR(em)) return NULL; } return em; } static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) { struct extent_map *next; bool ret = true; /* this is the last extent */ if (em->start + em->len >= i_size_read(inode)) return false; next = defrag_lookup_extent(inode, em->start + em->len); if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) ret = false; else if ((em->block_start + em->block_len == next->block_start) && (em->block_len > SZ_128K && next->block_len > SZ_128K)) ret = false; free_extent_map(next); return ret; } static int should_defrag_range(struct inode *inode, u64 start, u32 thresh, u64 *last_len, u64 *skip, u64 *defrag_end, int compress) { struct extent_map *em; int ret = 1; bool next_mergeable = true; bool prev_mergeable = true; /* * make sure that once we start defragging an extent, we keep on * defragging it */ if (start < *defrag_end) return 1; *skip = 0; em = defrag_lookup_extent(inode, start); if (!em) return 0; /* this will cover holes, and inline extents */ if (em->block_start >= EXTENT_MAP_LAST_BYTE) { ret = 0; goto out; } if (!*defrag_end) prev_mergeable = false; next_mergeable = defrag_check_next_extent(inode, em); /* * we hit a real extent, if it is big or the next extent is not a * real extent, don't bother defragging it */ if (!compress && (*last_len == 0 || *last_len >= thresh) && (em->len >= thresh || (!next_mergeable && !prev_mergeable))) ret = 0; out: /* * last_len ends up being a counter of how many bytes we've defragged. * every time we choose not to defrag an extent, we reset *last_len * so that the next tiny extent will force a defrag. * * The end result of this is that tiny extents before a single big * extent will force at least part of that big extent to be defragged. */ if (ret) { *defrag_end = extent_map_end(em); } else { *last_len = 0; *skip = extent_map_end(em); *defrag_end = 0; } free_extent_map(em); return ret; } /* * it doesn't do much good to defrag one or two pages * at a time. This pulls in a nice chunk of pages * to COW and defrag. * * It also makes sure the delalloc code has enough * dirty data to avoid making new small extents as part * of the defrag * * It's a good idea to start RA on this range * before calling this. */ static int cluster_pages_for_defrag(struct inode *inode, struct page **pages, unsigned long start_index, unsigned long num_pages) { unsigned long file_end; u64 isize = i_size_read(inode); u64 page_start; u64 page_end; u64 page_cnt; int ret; int i; int i_done; struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; struct extent_io_tree *tree; struct extent_changeset *data_reserved = NULL; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); file_end = (isize - 1) >> PAGE_SHIFT; if (!isize || start_index > file_end) return 0; page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); ret = btrfs_delalloc_reserve_space(inode, &data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT); if (ret) return ret; i_done = 0; tree = &BTRFS_I(inode)->io_tree; /* step one, lock all the pages */ for (i = 0; i < page_cnt; i++) { struct page *page; again: page = find_or_create_page(inode->i_mapping, start_index + i, mask); if (!page) break; page_start = page_offset(page); page_end = page_start + PAGE_SIZE - 1; while (1) { lock_extent_bits(tree, page_start, page_end, &cached_state); ordered = btrfs_lookup_ordered_extent(inode, page_start); unlock_extent_cached(tree, page_start, page_end, &cached_state, GFP_NOFS); if (!ordered) break; unlock_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); lock_page(page); /* * we unlocked the page above, so we need check if * it was released or not. */ if (page->mapping != inode->i_mapping) { unlock_page(page); put_page(page); goto again; } } if (!PageUptodate(page)) { btrfs_readpage(NULL, page); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); put_page(page); ret = -EIO; break; } } if (page->mapping != inode->i_mapping) { unlock_page(page); put_page(page); goto again; } pages[i] = page; i_done++; } if (!i_done || ret) goto out; if (!(inode->i_sb->s_flags & MS_ACTIVE)) goto out; /* * so now we have a nice long stream of locked * and up to date pages, lets wait on them */ for (i = 0; i < i_done; i++) wait_on_page_writeback(pages[i]); page_start = page_offset(pages[0]); page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS); if (i_done != page_cnt) { spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, data_reserved, start_index << PAGE_SHIFT, (page_cnt - i_done) << PAGE_SHIFT); } set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state, GFP_NOFS); for (i = 0; i < i_done; i++) { clear_page_dirty_for_io(pages[i]); ClearPageChecked(pages[i]); set_page_extent_mapped(pages[i]); set_page_dirty(pages[i]); unlock_page(pages[i]); put_page(pages[i]); } extent_changeset_free(data_reserved); return i_done; out: for (i = 0; i < i_done; i++) { unlock_page(pages[i]); put_page(pages[i]); } btrfs_delalloc_release_space(inode, data_reserved, start_index << PAGE_SHIFT, page_cnt << PAGE_SHIFT); extent_changeset_free(data_reserved); return ret; } int btrfs_defrag_file(struct inode *inode, struct file *file, struct btrfs_ioctl_defrag_range_args *range, u64 newer_than, unsigned long max_to_defrag) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct file_ra_state *ra = NULL; unsigned long last_index; u64 isize = i_size_read(inode); u64 last_len = 0; u64 skip = 0; u64 defrag_end = 0; u64 newer_off = range->start; unsigned long i; unsigned long ra_index = 0; int ret; int defrag_count = 0; int compress_type = BTRFS_COMPRESS_ZLIB; u32 extent_thresh = range->extent_thresh; unsigned long max_cluster = SZ_256K >> PAGE_SHIFT; unsigned long cluster = max_cluster; u64 new_align = ~((u64)SZ_128K - 1); struct page **pages = NULL; bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS; if (isize == 0) return 0; if (range->start >= isize) return -EINVAL; if (do_compress) { if (range->compress_type > BTRFS_COMPRESS_TYPES) return -EINVAL; if (range->compress_type) compress_type = range->compress_type; } if (extent_thresh == 0) extent_thresh = SZ_256K; /* * If we were not given a file, allocate a readahead context. As * readahead is just an optimization, defrag will work without it so * we don't error out. */ if (!file) { ra = kzalloc(sizeof(*ra), GFP_KERNEL); if (ra) file_ra_state_init(ra, inode->i_mapping); } else { ra = &file->f_ra; } pages = kmalloc_array(max_cluster, sizeof(struct page *), GFP_KERNEL); if (!pages) { ret = -ENOMEM; goto out_ra; } /* find the last page to defrag */ if (range->start + range->len > range->start) { last_index = min_t(u64, isize - 1, range->start + range->len - 1) >> PAGE_SHIFT; } else { last_index = (isize - 1) >> PAGE_SHIFT; } if (newer_than) { ret = find_new_extents(root, inode, newer_than, &newer_off, SZ_64K); if (!ret) { range->start = newer_off; /* * we always align our defrag to help keep * the extents in the file evenly spaced */ i = (newer_off & new_align) >> PAGE_SHIFT; } else goto out_ra; } else { i = range->start >> PAGE_SHIFT; } if (!max_to_defrag) max_to_defrag = last_index - i + 1; /* * make writeback starts from i, so the defrag range can be * written sequentially. */ if (i < inode->i_mapping->writeback_index) inode->i_mapping->writeback_index = i; while (i <= last_index && defrag_count < max_to_defrag && (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) { /* * make sure we stop running if someone unmounts * the FS */ if (!(inode->i_sb->s_flags & MS_ACTIVE)) break; if (btrfs_defrag_cancelled(fs_info)) { btrfs_debug(fs_info, "defrag_file cancelled"); ret = -EAGAIN; break; } if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT, extent_thresh, &last_len, &skip, &defrag_end, do_compress)){ unsigned long next; /* * the should_defrag function tells us how much to skip * bump our counter by the suggested amount */ next = DIV_ROUND_UP(skip, PAGE_SIZE); i = max(i + 1, next); continue; } if (!newer_than) { cluster = (PAGE_ALIGN(defrag_end) >> PAGE_SHIFT) - i; cluster = min(cluster, max_cluster); } else { cluster = max_cluster; } if (i + cluster > ra_index) { ra_index = max(i, ra_index); if (ra) page_cache_sync_readahead(inode->i_mapping, ra, file, ra_index, cluster); ra_index += cluster; } inode_lock(inode); if (do_compress) BTRFS_I(inode)->defrag_compress = compress_type; ret = cluster_pages_for_defrag(inode, pages, i, cluster); if (ret < 0) { inode_unlock(inode); goto out_ra; } defrag_count += ret; balance_dirty_pages_ratelimited(inode->i_mapping); inode_unlock(inode); if (newer_than) { if (newer_off == (u64)-1) break; if (ret > 0) i += ret; newer_off = max(newer_off + 1, (u64)i << PAGE_SHIFT); ret = find_new_extents(root, inode, newer_than, &newer_off, SZ_64K); if (!ret) { range->start = newer_off; i = (newer_off & new_align) >> PAGE_SHIFT; } else { break; } } else { if (ret > 0) { i += ret; last_len += ret << PAGE_SHIFT; } else { i++; last_len = 0; } } } if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) { filemap_flush(inode->i_mapping); if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags)) filemap_flush(inode->i_mapping); } if (do_compress) { /* the filemap_flush will queue IO into the worker threads, but * we have to make sure the IO is actually started and that * ordered extents get created before we return */ atomic_inc(&fs_info->async_submit_draining); while (atomic_read(&fs_info->nr_async_submits) || atomic_read(&fs_info->async_delalloc_pages)) { wait_event(fs_info->async_submit_wait, (atomic_read(&fs_info->nr_async_submits) == 0 && atomic_read(&fs_info->async_delalloc_pages) == 0)); } atomic_dec(&fs_info->async_submit_draining); } if (range->compress_type == BTRFS_COMPRESS_LZO) { btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) { btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); } ret = defrag_count; out_ra: if (do_compress) { inode_lock(inode); BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE; inode_unlock(inode); } if (!file) kfree(ra); kfree(pages); return ret; } static noinline int btrfs_ioctl_resize(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); u64 new_size; u64 old_size; u64 devid = 1; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_vol_args *vol_args; struct btrfs_trans_handle *trans; struct btrfs_device *device = NULL; char *sizestr; char *retptr; char *devstr = NULL; int ret = 0; int mod = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { mnt_drop_write_file(file); return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; } mutex_lock(&fs_info->volume_mutex); vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto out; } vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; sizestr = vol_args->name; devstr = strchr(sizestr, ':'); if (devstr) { sizestr = devstr + 1; *devstr = '\0'; devstr = vol_args->name; ret = kstrtoull(devstr, 10, &devid); if (ret) goto out_free; if (!devid) { ret = -EINVAL; goto out_free; } btrfs_info(fs_info, "resizing devid %llu", devid); } device = btrfs_find_device(fs_info, devid, NULL, NULL); if (!device) { btrfs_info(fs_info, "resizer unable to find device %llu", devid); ret = -ENODEV; goto out_free; } if (!device->writeable) { btrfs_info(fs_info, "resizer unable to apply on readonly device %llu", devid); ret = -EPERM; goto out_free; } if (!strcmp(sizestr, "max")) new_size = device->bdev->bd_inode->i_size; else { if (sizestr[0] == '-') { mod = -1; sizestr++; } else if (sizestr[0] == '+') { mod = 1; sizestr++; } new_size = memparse(sizestr, &retptr); if (*retptr != '\0' || new_size == 0) { ret = -EINVAL; goto out_free; } } if (device->is_tgtdev_for_dev_replace) { ret = -EPERM; goto out_free; } old_size = btrfs_device_get_total_bytes(device); if (mod < 0) { if (new_size > old_size) { ret = -EINVAL; goto out_free; } new_size = old_size - new_size; } else if (mod > 0) { if (new_size > ULLONG_MAX - old_size) { ret = -ERANGE; goto out_free; } new_size = old_size + new_size; } if (new_size < SZ_256M) { ret = -EINVAL; goto out_free; } if (new_size > device->bdev->bd_inode->i_size) { ret = -EFBIG; goto out_free; } new_size = round_down(new_size, fs_info->sectorsize); btrfs_info_in_rcu(fs_info, "new size for %s is %llu", rcu_str_deref(device->name), new_size); if (new_size > old_size) { trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_free; } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans); } else if (new_size < old_size) { ret = btrfs_shrink_device(device, new_size); } /* equal, nothing need to do */ out_free: kfree(vol_args); out: mutex_unlock(&fs_info->volume_mutex); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); mnt_drop_write_file(file); return ret; } static noinline int btrfs_ioctl_snap_create_transid(struct file *file, const char *name, unsigned long fd, int subvol, u64 *transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { int namelen; int ret = 0; if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; ret = mnt_want_write_file(file); if (ret) goto out; namelen = strlen(name); if (strchr(name, '/')) { ret = -EINVAL; goto out_drop_write; } if (name[0] == '.' && (namelen == 1 || (name[1] == '.' && namelen == 2))) { ret = -EEXIST; goto out_drop_write; } if (subvol) { ret = btrfs_mksubvol(&file->f_path, name, namelen, NULL, transid, readonly, inherit); } else { struct fd src = fdget(fd); struct inode *src_inode; if (!src.file) { ret = -EINVAL; goto out_drop_write; } src_inode = file_inode(src.file); if (src_inode->i_sb != file_inode(file)->i_sb) { btrfs_info(BTRFS_I(file_inode(file))->root->fs_info, "Snapshot src from another FS"); ret = -EXDEV; } else if (!inode_owner_or_capable(src_inode)) { /* * Subvolume creation is not restricted, but snapshots * are limited to own subvolumes only */ ret = -EPERM; } else { ret = btrfs_mksubvol(&file->f_path, name, namelen, BTRFS_I(src_inode)->root, transid, readonly, inherit); } fdput(src); } out_drop_write: mnt_drop_write_file(file); out: return ret; } static noinline int btrfs_ioctl_snap_create(struct file *file, void __user *arg, int subvol) { struct btrfs_ioctl_vol_args *vol_args; int ret; if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, vol_args->fd, subvol, NULL, false, NULL); kfree(vol_args); return ret; } static noinline int btrfs_ioctl_snap_create_v2(struct file *file, void __user *arg, int subvol) { struct btrfs_ioctl_vol_args_v2 *vol_args; int ret; u64 transid = 0; u64 *ptr = NULL; bool readonly = false; struct btrfs_qgroup_inherit *inherit = NULL; if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; if (vol_args->flags & ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY | BTRFS_SUBVOL_QGROUP_INHERIT)) { ret = -EOPNOTSUPP; goto free_args; } if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) ptr = &transid; if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { if (vol_args->size > PAGE_SIZE) { ret = -EINVAL; goto free_args; } inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size); if (IS_ERR(inherit)) { ret = PTR_ERR(inherit); goto free_args; } } ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, vol_args->fd, subvol, ptr, readonly, inherit); if (ret) goto free_inherit; if (ptr && copy_to_user(arg + offsetof(struct btrfs_ioctl_vol_args_v2, transid), ptr, sizeof(*ptr))) ret = -EFAULT; free_inherit: kfree(inherit); free_args: kfree(vol_args); return ret; } static noinline int btrfs_ioctl_subvol_getflags(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; int ret = 0; u64 flags = 0; if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&fs_info->subvol_sem); if (btrfs_root_readonly(root)) flags |= BTRFS_SUBVOL_RDONLY; up_read(&fs_info->subvol_sem); if (copy_to_user(arg, &flags, sizeof(flags))) ret = -EFAULT; return ret; } static noinline int btrfs_ioctl_subvol_setflags(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; u64 root_flags; u64 flags; int ret = 0; if (!inode_owner_or_capable(inode)) return -EPERM; ret = mnt_want_write_file(file); if (ret) goto out; if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out_drop_write; } if (copy_from_user(&flags, arg, sizeof(flags))) { ret = -EFAULT; goto out_drop_write; } if (flags & BTRFS_SUBVOL_CREATE_ASYNC) { ret = -EINVAL; goto out_drop_write; } if (flags & ~BTRFS_SUBVOL_RDONLY) { ret = -EOPNOTSUPP; goto out_drop_write; } down_write(&fs_info->subvol_sem); /* nothing to do */ if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root)) goto out_drop_sem; root_flags = btrfs_root_flags(&root->root_item); if (flags & BTRFS_SUBVOL_RDONLY) { btrfs_set_root_flags(&root->root_item, root_flags | BTRFS_ROOT_SUBVOL_RDONLY); } else { /* * Block RO -> RW transition if this subvolume is involved in * send */ spin_lock(&root->root_item_lock); if (root->send_in_progress == 0) { btrfs_set_root_flags(&root->root_item, root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY); spin_unlock(&root->root_item_lock); } else { spin_unlock(&root->root_item_lock); btrfs_warn(fs_info, "Attempt to set subvolume %llu read-write during send", root->root_key.objectid); ret = -EPERM; goto out_drop_sem; } } trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_reset; } ret = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); btrfs_commit_transaction(trans); out_reset: if (ret) btrfs_set_root_flags(&root->root_item, root_flags); out_drop_sem: up_write(&fs_info->subvol_sem); out_drop_write: mnt_drop_write_file(file); out: return ret; } /* * helper to check if the subvolume references other subvolumes */ static noinline int may_destroy_subvol(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_path *path; struct btrfs_dir_item *di; struct btrfs_key key; u64 dir_id; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* Make sure this root isn't set as the default subvol */ dir_id = btrfs_super_root_dir(fs_info->super_copy); di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, dir_id, "default", 7, 0); if (di && !IS_ERR(di)) { btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); if (key.objectid == root->root_key.objectid) { ret = -EPERM; btrfs_err(fs_info, "deleting default subvolume %llu is not allowed", key.objectid); goto out; } btrfs_release_path(path); } key.objectid = root->root_key.objectid; key.type = BTRFS_ROOT_REF_KEY; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) goto out; BUG_ON(ret == 0); ret = 0; if (path->slots[0] > 0) { path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == root->root_key.objectid && key.type == BTRFS_ROOT_REF_KEY) ret = -ENOTEMPTY; } out: btrfs_free_path(path); return ret; } static noinline int key_in_sk(struct btrfs_key *key, struct btrfs_ioctl_search_key *sk) { struct btrfs_key test; int ret; test.objectid = sk->min_objectid; test.type = sk->min_type; test.offset = sk->min_offset; ret = btrfs_comp_cpu_keys(key, &test); if (ret < 0) return 0; test.objectid = sk->max_objectid; test.type = sk->max_type; test.offset = sk->max_offset; ret = btrfs_comp_cpu_keys(key, &test); if (ret > 0) return 0; return 1; } static noinline int copy_to_sk(struct btrfs_path *path, struct btrfs_key *key, struct btrfs_ioctl_search_key *sk, size_t *buf_size, char __user *ubuf, unsigned long *sk_offset, int *num_found) { u64 found_transid; struct extent_buffer *leaf; struct btrfs_ioctl_search_header sh; struct btrfs_key test; unsigned long item_off; unsigned long item_len; int nritems; int i; int slot; int ret = 0; leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); if (btrfs_header_generation(leaf) > sk->max_transid) { i = nritems; goto advance_key; } found_transid = btrfs_header_generation(leaf); for (i = slot; i < nritems; i++) { item_off = btrfs_item_ptr_offset(leaf, i); item_len = btrfs_item_size_nr(leaf, i); btrfs_item_key_to_cpu(leaf, key, i); if (!key_in_sk(key, sk)) continue; if (sizeof(sh) + item_len > *buf_size) { if (*num_found) { ret = 1; goto out; } /* * return one empty item back for v1, which does not * handle -EOVERFLOW */ *buf_size = sizeof(sh) + item_len; item_len = 0; ret = -EOVERFLOW; } if (sizeof(sh) + item_len + *sk_offset > *buf_size) { ret = 1; goto out; } sh.objectid = key->objectid; sh.offset = key->offset; sh.type = key->type; sh.len = item_len; sh.transid = found_transid; /* copy search result header */ if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { ret = -EFAULT; goto out; } *sk_offset += sizeof(sh); if (item_len) { char __user *up = ubuf + *sk_offset; /* copy the item */ if (read_extent_buffer_to_user(leaf, up, item_off, item_len)) { ret = -EFAULT; goto out; } *sk_offset += item_len; } (*num_found)++; if (ret) /* -EOVERFLOW from above */ goto out; if (*num_found >= sk->nr_items) { ret = 1; goto out; } } advance_key: ret = 0; test.objectid = sk->max_objectid; test.type = sk->max_type; test.offset = sk->max_offset; if (btrfs_comp_cpu_keys(key, &test) >= 0) ret = 1; else if (key->offset < (u64)-1) key->offset++; else if (key->type < (u8)-1) { key->offset = 0; key->type++; } else if (key->objectid < (u64)-1) { key->offset = 0; key->type = 0; key->objectid++; } else ret = 1; out: /* * 0: all items from this leaf copied, continue with next * 1: * more items can be copied, but unused buffer is too small * * all items were found * Either way, it will stops the loop which iterates to the next * leaf * -EOVERFLOW: item was to large for buffer * -EFAULT: could not copy extent buffer back to userspace */ return ret; } static noinline int search_ioctl(struct inode *inode, struct btrfs_ioctl_search_key *sk, size_t *buf_size, char __user *ubuf) { struct btrfs_fs_info *info = btrfs_sb(inode->i_sb); struct btrfs_root *root; struct btrfs_key key; struct btrfs_path *path; int ret; int num_found = 0; unsigned long sk_offset = 0; if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) { *buf_size = sizeof(struct btrfs_ioctl_search_header); return -EOVERFLOW; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (sk->tree_id == 0) { /* search the root of the inode that was passed */ root = BTRFS_I(inode)->root; } else { key.objectid = sk->tree_id; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; root = btrfs_read_fs_root_no_name(info, &key); if (IS_ERR(root)) { btrfs_free_path(path); return -ENOENT; } } key.objectid = sk->min_objectid; key.type = sk->min_type; key.offset = sk->min_offset; while (1) { ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret != 0) { if (ret > 0) ret = 0; goto err; } ret = copy_to_sk(path, &key, sk, buf_size, ubuf, &sk_offset, &num_found); btrfs_release_path(path); if (ret) break; } if (ret > 0) ret = 0; err: sk->nr_items = num_found; btrfs_free_path(path); return ret; } static noinline int btrfs_ioctl_tree_search(struct file *file, void __user *argp) { struct btrfs_ioctl_search_args __user *uargs; struct btrfs_ioctl_search_key sk; struct inode *inode; int ret; size_t buf_size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; uargs = (struct btrfs_ioctl_search_args __user *)argp; if (copy_from_user(&sk, &uargs->key, sizeof(sk))) return -EFAULT; buf_size = sizeof(uargs->buf); inode = file_inode(file); ret = search_ioctl(inode, &sk, &buf_size, uargs->buf); /* * In the origin implementation an overflow is handled by returning a * search header with a len of zero, so reset ret. */ if (ret == -EOVERFLOW) ret = 0; if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk))) ret = -EFAULT; return ret; } static noinline int btrfs_ioctl_tree_search_v2(struct file *file, void __user *argp) { struct btrfs_ioctl_search_args_v2 __user *uarg; struct btrfs_ioctl_search_args_v2 args; struct inode *inode; int ret; size_t buf_size; const size_t buf_limit = SZ_16M; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* copy search header and buffer size */ uarg = (struct btrfs_ioctl_search_args_v2 __user *)argp; if (copy_from_user(&args, uarg, sizeof(args))) return -EFAULT; buf_size = args.buf_size; /* limit result size to 16MB */ if (buf_size > buf_limit) buf_size = buf_limit; inode = file_inode(file); ret = search_ioctl(inode, &args.key, &buf_size, (char *)(&uarg->buf[0])); if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key))) ret = -EFAULT; else if (ret == -EOVERFLOW && copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size))) ret = -EFAULT; return ret; } /* * Search INODE_REFs to identify path name of 'dirid' directory * in a 'tree_id' tree. and sets path name to 'name'. */ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, u64 tree_id, u64 dirid, char *name) { struct btrfs_root *root; struct btrfs_key key; char *ptr; int ret = -1; int slot; int len; int total_len = 0; struct btrfs_inode_ref *iref; struct extent_buffer *l; struct btrfs_path *path; if (dirid == BTRFS_FIRST_FREE_OBJECTID) { name[0]='\0'; return 0; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX]; key.objectid = tree_id; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; root = btrfs_read_fs_root_no_name(info, &key); if (IS_ERR(root)) { btrfs_err(info, "could not find root %llu", tree_id); ret = -ENOENT; goto out; } key.objectid = dirid; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; else if (ret > 0) { ret = btrfs_previous_item(root, path, dirid, BTRFS_INODE_REF_KEY); if (ret < 0) goto out; else if (ret > 0) { ret = -ENOENT; goto out; } } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &key, slot); iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(l, iref); ptr -= len + 1; total_len += len + 1; if (ptr < name) { ret = -ENAMETOOLONG; goto out; } *(ptr + len) = '/'; read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len); if (key.offset == BTRFS_FIRST_FREE_OBJECTID) break; btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; } memmove(name, ptr, total_len); name[total_len] = '\0'; ret = 0; out: btrfs_free_path(path); return ret; } static noinline int btrfs_ioctl_ino_lookup(struct file *file, void __user *argp) { struct btrfs_ioctl_ino_lookup_args *args; struct inode *inode; int ret = 0; args = memdup_user(argp, sizeof(*args)); if (IS_ERR(args)) return PTR_ERR(args); inode = file_inode(file); /* * Unprivileged query to obtain the containing subvolume root id. The * path is reset so it's consistent with btrfs_search_path_in_tree. */ if (args->treeid == 0) args->treeid = BTRFS_I(inode)->root->root_key.objectid; if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) { args->name[0] = 0; goto out; } if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out; } ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info, args->treeid, args->objectid, args->name); out: if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ret = -EFAULT; kfree(args); return ret; } static noinline int btrfs_ioctl_snap_destroy(struct file *file, void __user *arg) { struct dentry *parent = file->f_path.dentry; struct btrfs_fs_info *fs_info = btrfs_sb(parent->d_sb); struct dentry *dentry; struct inode *dir = d_inode(parent); struct inode *inode; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *dest = NULL; struct btrfs_ioctl_vol_args *vol_args; struct btrfs_trans_handle *trans; struct btrfs_block_rsv block_rsv; u64 root_flags; u64 qgroup_reserved; int namelen; int ret; int err = 0; if (!S_ISDIR(dir->i_mode)) return -ENOTDIR; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; namelen = strlen(vol_args->name); if (strchr(vol_args->name, '/') || strncmp(vol_args->name, "..", namelen) == 0) { err = -EINVAL; goto out; } err = mnt_want_write_file(file); if (err) goto out; err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); if (err == -EINTR) goto out_drop_write; dentry = lookup_one_len(vol_args->name, parent, namelen); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock_dir; } if (d_really_is_negative(dentry)) { err = -ENOENT; goto out_dput; } inode = d_inode(dentry); dest = BTRFS_I(inode)->root; if (!capable(CAP_SYS_ADMIN)) { /* * Regular user. Only allow this with a special mount * option, when the user has write+exec access to the * subvol root, and when rmdir(2) would have been * allowed. * * Note that this is _not_ check that the subvol is * empty or doesn't contain data that we wouldn't * otherwise be able to delete. * * Users who want to delete empty subvols should try * rmdir(2). */ err = -EPERM; if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED)) goto out_dput; /* * Do not allow deletion if the parent dir is the same * as the dir to be deleted. That means the ioctl * must be called on the dentry referencing the root * of the subvol, not a random directory contained * within it. */ err = -EINVAL; if (root == dest) goto out_dput; err = inode_permission(inode, MAY_WRITE | MAY_EXEC); if (err) goto out_dput; } /* check if subvolume may be deleted by a user */ err = btrfs_may_delete(dir, dentry, 1); if (err) goto out_dput; if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { err = -EINVAL; goto out_dput; } inode_lock(inode); /* * Don't allow to delete a subvolume with send in progress. This is * inside the i_mutex so the error handling that has to drop the bit * again is not run concurrently. */ spin_lock(&dest->root_item_lock); root_flags = btrfs_root_flags(&dest->root_item); if (dest->send_in_progress == 0) { btrfs_set_root_flags(&dest->root_item, root_flags | BTRFS_ROOT_SUBVOL_DEAD); spin_unlock(&dest->root_item_lock); } else { spin_unlock(&dest->root_item_lock); btrfs_warn(fs_info, "Attempt to delete subvolume %llu during send", dest->root_key.objectid); err = -EPERM; goto out_unlock_inode; } down_write(&fs_info->subvol_sem); err = may_destroy_subvol(dest); if (err) goto out_up_write; btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); /* * One for dir inode, two for dir entries, two for root * ref/backref. */ err = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, &qgroup_reserved, true); if (err) goto out_up_write; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); goto out_release; } trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; btrfs_record_snapshot_destroy(trans, BTRFS_I(dir)); ret = btrfs_unlink_subvol(trans, root, dir, dest->root_key.objectid, dentry->d_name.name, dentry->d_name.len); if (ret) { err = ret; btrfs_abort_transaction(trans, ret); goto out_end_trans; } btrfs_record_root_in_trans(trans, dest); memset(&dest->root_item.drop_progress, 0, sizeof(dest->root_item.drop_progress)); dest->root_item.drop_level = 0; btrfs_set_root_refs(&dest->root_item, 0); if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { ret = btrfs_insert_orphan_item(trans, fs_info->tree_root, dest->root_key.objectid); if (ret) { btrfs_abort_transaction(trans, ret); err = ret; goto out_end_trans; } } ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL, dest->root_key.objectid); if (ret && ret != -ENOENT) { btrfs_abort_transaction(trans, ret); err = ret; goto out_end_trans; } if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { ret = btrfs_uuid_tree_rem(trans, fs_info, dest->root_item.received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, dest->root_key.objectid); if (ret && ret != -ENOENT) { btrfs_abort_transaction(trans, ret); err = ret; goto out_end_trans; } } out_end_trans: trans->block_rsv = NULL; trans->bytes_reserved = 0; ret = btrfs_end_transaction(trans); if (ret && !err) err = ret; inode->i_flags |= S_DEAD; out_release: btrfs_subvolume_release_metadata(fs_info, &block_rsv); out_up_write: up_write(&fs_info->subvol_sem); if (err) { spin_lock(&dest->root_item_lock); root_flags = btrfs_root_flags(&dest->root_item); btrfs_set_root_flags(&dest->root_item, root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); spin_unlock(&dest->root_item_lock); } out_unlock_inode: inode_unlock(inode); if (!err) { d_invalidate(dentry); btrfs_invalidate_inodes(dest); d_delete(dentry); ASSERT(dest->send_in_progress == 0); /* the last ref */ if (dest->ino_cache_inode) { iput(dest->ino_cache_inode); dest->ino_cache_inode = NULL; } } out_dput: dput(dentry); out_unlock_dir: inode_unlock(dir); out_drop_write: mnt_drop_write_file(file); out: kfree(vol_args); return err; } static int btrfs_ioctl_defrag(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_defrag_range_args *range; int ret; ret = mnt_want_write_file(file); if (ret) return ret; if (btrfs_root_readonly(root)) { ret = -EROFS; goto out; } switch (inode->i_mode & S_IFMT) { case S_IFDIR: if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out; } ret = btrfs_defrag_root(root); break; case S_IFREG: if (!(file->f_mode & FMODE_WRITE)) { ret = -EINVAL; goto out; } range = kzalloc(sizeof(*range), GFP_KERNEL); if (!range) { ret = -ENOMEM; goto out; } if (argp) { if (copy_from_user(range, argp, sizeof(*range))) { ret = -EFAULT; kfree(range); goto out; } /* compression requires us to start the IO */ if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { range->flags |= BTRFS_DEFRAG_RANGE_START_IO; range->extent_thresh = (u32)-1; } } else { /* the rest are all set to zero by kzalloc */ range->len = (u64)-1; } ret = btrfs_defrag_file(file_inode(file), file, range, 0, 0); if (ret > 0) ret = 0; kfree(range); break; default: ret = -EINVAL; } out: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_vol_args *vol_args; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; mutex_lock(&fs_info->volume_mutex); vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto out; } vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; ret = btrfs_init_new_device(fs_info, vol_args->name); if (!ret) btrfs_info(fs_info, "disk added %s", vol_args->name); kfree(vol_args); out: mutex_unlock(&fs_info->volume_mutex); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); return ret; } static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_vol_args_v2 *vol_args; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto err_drop; } /* Check for compatibility reject unknown flags */ if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) return -EOPNOTSUPP; if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; goto out; } mutex_lock(&fs_info->volume_mutex); if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) { ret = btrfs_rm_device(fs_info, NULL, vol_args->devid); } else { vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; ret = btrfs_rm_device(fs_info, vol_args->name, 0); } mutex_unlock(&fs_info->volume_mutex); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); if (!ret) { if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) btrfs_info(fs_info, "device deleted: id %llu", vol_args->devid); else btrfs_info(fs_info, "device deleted: %s", vol_args->name); } out: kfree(vol_args); err_drop: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_vol_args *vol_args; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; goto out_drop_write; } vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto out; } vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; mutex_lock(&fs_info->volume_mutex); ret = btrfs_rm_device(fs_info, vol_args->name, 0); mutex_unlock(&fs_info->volume_mutex); if (!ret) btrfs_info(fs_info, "disk deleted %s", vol_args->name); kfree(vol_args); out: clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); out_drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; int ret = 0; fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); if (!fi_args) return -ENOMEM; mutex_lock(&fs_devices->device_list_mutex); fi_args->num_devices = fs_devices->num_devices; memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid)); list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->devid > fi_args->max_id) fi_args->max_id = device->devid; } mutex_unlock(&fs_devices->device_list_mutex); fi_args->nodesize = fs_info->super_copy->nodesize; fi_args->sectorsize = fs_info->super_copy->sectorsize; fi_args->clone_alignment = fs_info->super_copy->sectorsize; if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; kfree(fi_args); return ret; } static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_dev_info_args *di_args; struct btrfs_device *dev; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; int ret = 0; char *s_uuid = NULL; di_args = memdup_user(arg, sizeof(*di_args)); if (IS_ERR(di_args)) return PTR_ERR(di_args); if (!btrfs_is_empty_uuid(di_args->uuid)) s_uuid = di_args->uuid; mutex_lock(&fs_devices->device_list_mutex); dev = btrfs_find_device(fs_info, di_args->devid, s_uuid, NULL); if (!dev) { ret = -ENODEV; goto out; } di_args->devid = dev->devid; di_args->bytes_used = btrfs_device_get_bytes_used(dev); di_args->total_bytes = btrfs_device_get_total_bytes(dev); memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); if (dev->name) { struct rcu_string *name; rcu_read_lock(); name = rcu_dereference(dev->name); strncpy(di_args->path, name->str, sizeof(di_args->path)); rcu_read_unlock(); di_args->path[sizeof(di_args->path) - 1] = 0; } else { di_args->path[0] = '\0'; } out: mutex_unlock(&fs_devices->device_list_mutex); if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) ret = -EFAULT; kfree(di_args); return ret; } static struct page *extent_same_get_page(struct inode *inode, pgoff_t index) { struct page *page; page = grab_cache_page(inode->i_mapping, index); if (!page) return ERR_PTR(-ENOMEM); if (!PageUptodate(page)) { int ret; ret = btrfs_readpage(NULL, page); if (ret) return ERR_PTR(ret); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); put_page(page); return ERR_PTR(-EIO); } if (page->mapping != inode->i_mapping) { unlock_page(page); put_page(page); return ERR_PTR(-EAGAIN); } } return page; } static int gather_extent_pages(struct inode *inode, struct page **pages, int num_pages, u64 off) { int i; pgoff_t index = off >> PAGE_SHIFT; for (i = 0; i < num_pages; i++) { again: pages[i] = extent_same_get_page(inode, index + i); if (IS_ERR(pages[i])) { int err = PTR_ERR(pages[i]); if (err == -EAGAIN) goto again; pages[i] = NULL; return err; } } return 0; } static int lock_extent_range(struct inode *inode, u64 off, u64 len, bool retry_range_locking) { /* * Do any pending delalloc/csum calculations on inode, one way or * another, and lock file content. * The locking order is: * * 1) pages * 2) range in the inode's io tree */ while (1) { struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); ordered = btrfs_lookup_first_ordered_extent(inode, off + len - 1); if ((!ordered || ordered->file_offset + ordered->len <= off || ordered->file_offset >= off + len) && !test_range_bit(&BTRFS_I(inode)->io_tree, off, off + len - 1, EXTENT_DELALLOC, 0, NULL)) { if (ordered) btrfs_put_ordered_extent(ordered); break; } unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); if (ordered) btrfs_put_ordered_extent(ordered); if (!retry_range_locking) return -EAGAIN; btrfs_wait_ordered_range(inode, off, len); } return 0; } static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2) { inode_unlock(inode1); inode_unlock(inode2); } static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2) { if (inode1 < inode2) swap(inode1, inode2); inode_lock_nested(inode1, I_MUTEX_PARENT); inode_lock_nested(inode2, I_MUTEX_CHILD); } static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, struct inode *inode2, u64 loff2, u64 len) { unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); } static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1, struct inode *inode2, u64 loff2, u64 len, bool retry_range_locking) { int ret; if (inode1 < inode2) { swap(inode1, inode2); swap(loff1, loff2); } ret = lock_extent_range(inode1, loff1, len, retry_range_locking); if (ret) return ret; ret = lock_extent_range(inode2, loff2, len, retry_range_locking); if (ret) unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); return ret; } struct cmp_pages { int num_pages; struct page **src_pages; struct page **dst_pages; }; static void btrfs_cmp_data_free(struct cmp_pages *cmp) { int i; struct page *pg; for (i = 0; i < cmp->num_pages; i++) { pg = cmp->src_pages[i]; if (pg) { unlock_page(pg); put_page(pg); } pg = cmp->dst_pages[i]; if (pg) { unlock_page(pg); put_page(pg); } } kfree(cmp->src_pages); kfree(cmp->dst_pages); } static int btrfs_cmp_data_prepare(struct inode *src, u64 loff, struct inode *dst, u64 dst_loff, u64 len, struct cmp_pages *cmp) { int ret; int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT; struct page **src_pgarr, **dst_pgarr; /* * We must gather up all the pages before we initiate our * extent locking. We use an array for the page pointers. Size * of the array is bounded by len, which is in turn bounded by * BTRFS_MAX_DEDUPE_LEN. */ src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); if (!src_pgarr || !dst_pgarr) { kfree(src_pgarr); kfree(dst_pgarr); return -ENOMEM; } cmp->num_pages = num_pages; cmp->src_pages = src_pgarr; cmp->dst_pages = dst_pgarr; /* * If deduping ranges in the same inode, locking rules make it mandatory * to always lock pages in ascending order to avoid deadlocks with * concurrent tasks (such as starting writeback/delalloc). */ if (src == dst && dst_loff < loff) { swap(src_pgarr, dst_pgarr); swap(loff, dst_loff); } ret = gather_extent_pages(src, src_pgarr, cmp->num_pages, loff); if (ret) goto out; ret = gather_extent_pages(dst, dst_pgarr, cmp->num_pages, dst_loff); out: if (ret) btrfs_cmp_data_free(cmp); return 0; } static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp) { int ret = 0; int i; struct page *src_page, *dst_page; unsigned int cmp_len = PAGE_SIZE; void *addr, *dst_addr; i = 0; while (len) { if (len < PAGE_SIZE) cmp_len = len; BUG_ON(i >= cmp->num_pages); src_page = cmp->src_pages[i]; dst_page = cmp->dst_pages[i]; ASSERT(PageLocked(src_page)); ASSERT(PageLocked(dst_page)); addr = kmap_atomic(src_page); dst_addr = kmap_atomic(dst_page); flush_dcache_page(src_page); flush_dcache_page(dst_page); if (memcmp(addr, dst_addr, cmp_len)) ret = -EBADE; kunmap_atomic(addr); kunmap_atomic(dst_addr); if (ret) break; len -= cmp_len; i++; } return ret; } static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen, u64 olen) { u64 len = *plen; u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize; if (off + olen > inode->i_size || off + olen < off) return -EINVAL; /* if we extend to eof, continue to block boundary */ if (off + len == inode->i_size) *plen = len = ALIGN(inode->i_size, bs) - off; /* Check that we are block aligned - btrfs_clone() requires this */ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs)) return -EINVAL; return 0; } static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, struct inode *dst, u64 dst_loff) { int ret; u64 len = olen; struct cmp_pages cmp; bool same_inode = (src == dst); u64 same_lock_start = 0; u64 same_lock_len = 0; if (len == 0) return 0; if (same_inode) inode_lock(src); else btrfs_double_inode_lock(src, dst); ret = extent_same_check_offsets(src, loff, &len, olen); if (ret) goto out_unlock; ret = extent_same_check_offsets(dst, dst_loff, &len, olen); if (ret) goto out_unlock; if (same_inode) { /* * Single inode case wants the same checks, except we * don't want our length pushed out past i_size as * comparing that data range makes no sense. * * extent_same_check_offsets() will do this for an * unaligned length at i_size, so catch it here and * reject the request. * * This effectively means we require aligned extents * for the single-inode case, whereas the other cases * allow an unaligned length so long as it ends at * i_size. */ if (len != olen) { ret = -EINVAL; goto out_unlock; } /* Check for overlapping ranges */ if (dst_loff + len > loff && dst_loff < loff + len) { ret = -EINVAL; goto out_unlock; } same_lock_start = min_t(u64, loff, dst_loff); same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start; } /* don't make the dst file partly checksummed */ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) { ret = -EINVAL; goto out_unlock; } again: ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp); if (ret) goto out_unlock; if (same_inode) ret = lock_extent_range(src, same_lock_start, same_lock_len, false); else ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len, false); /* * If one of the inodes has dirty pages in the respective range or * ordered extents, we need to flush dellaloc and wait for all ordered * extents in the range. We must unlock the pages and the ranges in the * io trees to avoid deadlocks when flushing delalloc (requires locking * pages) and when waiting for ordered extents to complete (they require * range locking). */ if (ret == -EAGAIN) { /* * Ranges in the io trees already unlocked. Now unlock all * pages before waiting for all IO to complete. */ btrfs_cmp_data_free(&cmp); if (same_inode) { btrfs_wait_ordered_range(src, same_lock_start, same_lock_len); } else { btrfs_wait_ordered_range(src, loff, len); btrfs_wait_ordered_range(dst, dst_loff, len); } goto again; } ASSERT(ret == 0); if (WARN_ON(ret)) { /* ranges in the io trees already unlocked */ btrfs_cmp_data_free(&cmp); return ret; } /* pass original length for comparison so we stay within i_size */ ret = btrfs_cmp_data(olen, &cmp); if (ret == 0) ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1); if (same_inode) unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start, same_lock_start + same_lock_len - 1); else btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); btrfs_cmp_data_free(&cmp); out_unlock: if (same_inode) inode_unlock(src); else btrfs_double_inode_unlock(src, dst); return ret; } #define BTRFS_MAX_DEDUPE_LEN SZ_16M ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, struct file *dst_file, u64 dst_loff) { struct inode *src = file_inode(src_file); struct inode *dst = file_inode(dst_file); u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; ssize_t res; if (olen > BTRFS_MAX_DEDUPE_LEN) olen = BTRFS_MAX_DEDUPE_LEN; if (WARN_ON_ONCE(bs < PAGE_SIZE)) { /* * Btrfs does not support blocksize < page_size. As a * result, btrfs_cmp_data() won't correctly handle * this situation without an update. */ return -EINVAL; } res = btrfs_extent_same(src, loff, olen, dst, dst_loff); if (res) return res; return olen; } static int clone_finish_inode_update(struct btrfs_trans_handle *trans, struct inode *inode, u64 endoff, const u64 destoff, const u64 olen, int no_time_update) { struct btrfs_root *root = BTRFS_I(inode)->root; int ret; inode_inc_iversion(inode); if (!no_time_update) inode->i_mtime = inode->i_ctime = current_time(inode); /* * We round up to the block size at eof when determining which * extents to clone above, but shouldn't round up the file size. */ if (endoff > destoff + olen) endoff = destoff + olen; if (endoff > inode->i_size) btrfs_i_size_write(BTRFS_I(inode), endoff); ret = btrfs_update_inode(trans, root, inode); if (ret) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } ret = btrfs_end_transaction(trans); out: return ret; } static void clone_update_extent_map(struct btrfs_inode *inode, const struct btrfs_trans_handle *trans, const struct btrfs_path *path, const u64 hole_offset, const u64 hole_len) { struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; em = alloc_extent_map(); if (!em) { set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); return; } if (path) { struct btrfs_file_extent_item *fi; fi = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_file_extent_item); btrfs_extent_item_to_extent_map(inode, path, fi, false, em); em->generation = -1; if (btrfs_file_extent_type(path->nodes[0], fi) == BTRFS_FILE_EXTENT_INLINE) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } else { em->start = hole_offset; em->len = hole_len; em->ram_bytes = em->len; em->orig_start = hole_offset; em->block_start = EXTENT_MAP_HOLE; em->block_len = 0; em->orig_block_len = 0; em->compress_type = BTRFS_COMPRESS_NONE; em->generation = trans->transid; } while (1) { write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 1); write_unlock(&em_tree->lock); if (ret != -EEXIST) { free_extent_map(em); break; } btrfs_drop_extent_cache(inode, em->start, em->start + em->len - 1, 0); } if (ret) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } /* * Make sure we do not end up inserting an inline extent into a file that has * already other (non-inline) extents. If a file has an inline extent it can * not have any other extents and the (single) inline extent must start at the * file offset 0. Failing to respect these rules will lead to file corruption, * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc * * We can have extents that have been already written to disk or we can have * dirty ranges still in delalloc, in which case the extent maps and items are * created only when we run delalloc, and the delalloc ranges might fall outside * the range we are currently locking in the inode's io tree. So we check the * inode's i_size because of that (i_size updates are done while holding the * i_mutex, which we are holding here). * We also check to see if the inode has a size not greater than "datal" but has * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are * protected against such concurrent fallocate calls by the i_mutex). * * If the file has no extents but a size greater than datal, do not allow the * copy because we would need turn the inline extent into a non-inline one (even * with NO_HOLES enabled). If we find our destination inode only has one inline * extent, just overwrite it with the source inline extent if its size is less * than the source extent's size, or we could copy the source inline extent's * data into the destination inode's inline extent if the later is greater then * the former. */ static int clone_copy_inline_extent(struct inode *dst, struct btrfs_trans_handle *trans, struct btrfs_path *path, struct btrfs_key *new_key, const u64 drop_start, const u64 datal, const u64 skip, const u64 size, char *inline_data) { struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); struct btrfs_root *root = BTRFS_I(dst)->root; const u64 aligned_end = ALIGN(new_key->offset + datal, fs_info->sectorsize); int ret; struct btrfs_key key; if (new_key->offset > 0) return -EOPNOTSUPP; key.objectid = btrfs_ino(BTRFS_I(dst)); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { return ret; } else if (ret > 0) { if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) return ret; else if (ret > 0) goto copy_inline_extent; } btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == btrfs_ino(BTRFS_I(dst)) && key.type == BTRFS_EXTENT_DATA_KEY) { ASSERT(key.offset > 0); return -EOPNOTSUPP; } } else if (i_size_read(dst) <= datal) { struct btrfs_file_extent_item *ei; u64 ext_len; /* * If the file size is <= datal, make sure there are no other * extents following (can happen do to an fallocate call with * the flag FALLOC_FL_KEEP_SIZE). */ ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_file_extent_item); /* * If it's an inline extent, it can not have other extents * following it. */ if (btrfs_file_extent_type(path->nodes[0], ei) == BTRFS_FILE_EXTENT_INLINE) goto copy_inline_extent; ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei); if (ext_len > aligned_end) return -EOPNOTSUPP; ret = btrfs_next_item(root, path); if (ret < 0) { return ret; } else if (ret == 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid == btrfs_ino(BTRFS_I(dst)) && key.type == BTRFS_EXTENT_DATA_KEY) return -EOPNOTSUPP; } } copy_inline_extent: /* * We have no extent items, or we have an extent at offset 0 which may * or may not be inlined. All these cases are dealt the same way. */ if (i_size_read(dst) > datal) { /* * If the destination inode has an inline extent... * This would require copying the data from the source inline * extent into the beginning of the destination's inline extent. * But this is really complex, both extents can be compressed * or just one of them, which would require decompressing and * re-compressing data (which could increase the new compressed * size, not allowing the compressed data to fit anymore in an * inline extent). * So just don't support this case for now (it should be rare, * we are not really saving space when cloning inline extents). */ return -EOPNOTSUPP; } btrfs_release_path(path); ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1); if (ret) return ret; ret = btrfs_insert_empty_item(trans, root, path, new_key, size); if (ret) return ret; if (skip) { const u32 start = btrfs_file_extent_calc_inline_size(0); memmove(inline_data + start, inline_data + start + skip, datal); } write_extent_buffer(path->nodes[0], inline_data, btrfs_item_ptr_offset(path->nodes[0], path->slots[0]), size); inode_add_bytes(dst, datal); return 0; } /** * btrfs_clone() - clone a range from inode file to another * * @src: Inode to clone from * @inode: Inode to clone to * @off: Offset within source to start clone from * @olen: Original length, passed by user, of range to clone * @olen_aligned: Block-aligned value of olen * @destoff: Offset within @inode to start clone * @no_time_update: Whether to update mtime/ctime on the target inode */ static int btrfs_clone(struct inode *src, struct inode *inode, const u64 off, const u64 olen, const u64 olen_aligned, const u64 destoff, int no_time_update) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_path *path = NULL; struct extent_buffer *leaf; struct btrfs_trans_handle *trans; char *buf = NULL; struct btrfs_key key; u32 nritems; int slot; int ret; const u64 len = olen_aligned; u64 last_dest_end = destoff; ret = -ENOMEM; buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); if (!buf) return ret; path = btrfs_alloc_path(); if (!path) { kvfree(buf); return ret; } path->reada = READA_FORWARD; /* clone data */ key.objectid = btrfs_ino(BTRFS_I(src)); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = off; while (1) { u64 next_key_min_offset = key.offset + 1; /* * note the key will change type as we walk through the * tree. */ path->leave_spinning = 1; ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 0, 0); if (ret < 0) goto out; /* * First search, if no extent item that starts at offset off was * found but the previous item is an extent item, it's possible * it might overlap our target range, therefore process it. */ if (key.offset == off && ret > 0 && path->slots[0] > 0) { btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); if (key.type == BTRFS_EXTENT_DATA_KEY) path->slots[0]--; } nritems = btrfs_header_nritems(path->nodes[0]); process_slot: if (path->slots[0] >= nritems) { ret = btrfs_next_leaf(BTRFS_I(src)->root, path); if (ret < 0) goto out; if (ret > 0) break; nritems = btrfs_header_nritems(path->nodes[0]); } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.type > BTRFS_EXTENT_DATA_KEY || key.objectid != btrfs_ino(BTRFS_I(src))) break; if (key.type == BTRFS_EXTENT_DATA_KEY) { struct btrfs_file_extent_item *extent; int type; u32 size; struct btrfs_key new_key; u64 disko = 0, diskl = 0; u64 datao = 0, datal = 0; u8 comp; u64 drop_start; extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); comp = btrfs_file_extent_compression(leaf, extent); type = btrfs_file_extent_type(leaf, extent); if (type == BTRFS_FILE_EXTENT_REG || type == BTRFS_FILE_EXTENT_PREALLOC) { disko = btrfs_file_extent_disk_bytenr(leaf, extent); diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); datao = btrfs_file_extent_offset(leaf, extent); datal = btrfs_file_extent_num_bytes(leaf, extent); } else if (type == BTRFS_FILE_EXTENT_INLINE) { /* take upper bound, may be compressed */ datal = btrfs_file_extent_ram_bytes(leaf, extent); } /* * The first search might have left us at an extent * item that ends before our target range's start, can * happen if we have holes and NO_HOLES feature enabled. */ if (key.offset + datal <= off) { path->slots[0]++; goto process_slot; } else if (key.offset >= off + len) { break; } next_key_min_offset = key.offset + datal; size = btrfs_item_size_nr(leaf, slot); read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), size); btrfs_release_path(path); path->leave_spinning = 0; memcpy(&new_key, &key, sizeof(new_key)); new_key.objectid = btrfs_ino(BTRFS_I(inode)); if (off <= key.offset) new_key.offset = key.offset + destoff - off; else new_key.offset = destoff; /* * Deal with a hole that doesn't have an extent item * that represents it (NO_HOLES feature enabled). * This hole is either in the middle of the cloning * range or at the beginning (fully overlaps it or * partially overlaps it). */ if (new_key.offset != last_dest_end) drop_start = last_dest_end; else drop_start = new_key.offset; /* * 1 - adjusting old extent (we may have to split it) * 1 - add new extent * 1 - inode update */ trans = btrfs_start_transaction(root, 3); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } if (type == BTRFS_FILE_EXTENT_REG || type == BTRFS_FILE_EXTENT_PREALLOC) { /* * a | --- range to clone ---| b * | ------------- extent ------------- | */ /* subtract range b */ if (key.offset + datal > off + len) datal = off + len - key.offset; /* subtract range a */ if (off > key.offset) { datao += off - key.offset; datal -= off - key.offset; } ret = btrfs_drop_extents(trans, root, inode, drop_start, new_key.offset + datal, 1); if (ret) { if (ret != -EOPNOTSUPP) btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } ret = btrfs_insert_empty_item(trans, root, path, &new_key, size); if (ret) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } leaf = path->nodes[0]; slot = path->slots[0]; write_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), size); extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); /* disko == 0 means it's a hole */ if (!disko) datao = 0; btrfs_set_file_extent_offset(leaf, extent, datao); btrfs_set_file_extent_num_bytes(leaf, extent, datal); if (disko) { inode_add_bytes(inode, datal); ret = btrfs_inc_extent_ref(trans, fs_info, disko, diskl, 0, root->root_key.objectid, btrfs_ino(BTRFS_I(inode)), new_key.offset - datao); if (ret) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } } } else if (type == BTRFS_FILE_EXTENT_INLINE) { u64 skip = 0; u64 trim = 0; if (off > key.offset) { skip = off - key.offset; new_key.offset += skip; } if (key.offset + datal > off + len) trim = key.offset + datal - (off + len); if (comp && (skip || trim)) { ret = -EINVAL; btrfs_end_transaction(trans); goto out; } size -= skip + trim; datal -= skip + trim; ret = clone_copy_inline_extent(inode, trans, path, &new_key, drop_start, datal, skip, size, buf); if (ret) { if (ret != -EOPNOTSUPP) btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } leaf = path->nodes[0]; slot = path->slots[0]; } /* If we have an implicit hole (NO_HOLES feature). */ if (drop_start < new_key.offset) clone_update_extent_map(BTRFS_I(inode), trans, NULL, drop_start, new_key.offset - drop_start); clone_update_extent_map(BTRFS_I(inode), trans, path, 0, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); last_dest_end = ALIGN(new_key.offset + datal, fs_info->sectorsize); ret = clone_finish_inode_update(trans, inode, last_dest_end, destoff, olen, no_time_update); if (ret) goto out; if (new_key.offset + datal >= destoff + len) break; } btrfs_release_path(path); key.offset = next_key_min_offset; if (fatal_signal_pending(current)) { ret = -EINTR; goto out; } } ret = 0; if (last_dest_end < destoff + len) { /* * We have an implicit hole (NO_HOLES feature is enabled) that * fully or partially overlaps our cloning range at its end. */ btrfs_release_path(path); /* * 1 - remove extent(s) * 1 - inode update */ trans = btrfs_start_transaction(root, 2); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } ret = btrfs_drop_extents(trans, root, inode, last_dest_end, destoff + len, 1); if (ret) { if (ret != -EOPNOTSUPP) btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } clone_update_extent_map(BTRFS_I(inode), trans, NULL, last_dest_end, destoff + len - last_dest_end); ret = clone_finish_inode_update(trans, inode, destoff + len, destoff, olen, no_time_update); } out: btrfs_free_path(path); kvfree(buf); return ret; } static noinline int btrfs_clone_files(struct file *file, struct file *file_src, u64 off, u64 olen, u64 destoff) { struct inode *inode = file_inode(file); struct inode *src = file_inode(file_src); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; int ret; u64 len = olen; u64 bs = fs_info->sb->s_blocksize; int same_inode = src == inode; /* * TODO: * - split compressed inline extents. annoying: we need to * decompress into destination's address_space (the file offset * may change, so source mapping won't do), then recompress (or * otherwise reinsert) a subrange. * * - split destination inode's inline extents. The inline extents can * be either compressed or non-compressed. */ if (btrfs_root_readonly(root)) return -EROFS; if (file_src->f_path.mnt != file->f_path.mnt || src->i_sb != inode->i_sb) return -EXDEV; /* don't make the dst file partly checksummed */ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) != (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) return -EINVAL; if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode)) return -EISDIR; if (!same_inode) { btrfs_double_inode_lock(src, inode); } else { inode_lock(src); } /* determine range to clone */ ret = -EINVAL; if (off + len > src->i_size || off + len < off) goto out_unlock; if (len == 0) olen = len = src->i_size - off; /* if we extend to eof, continue to block boundary */ if (off + len == src->i_size) len = ALIGN(src->i_size, bs) - off; if (len == 0) { ret = 0; goto out_unlock; } /* verify the end result is block aligned */ if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) || !IS_ALIGNED(destoff, bs)) goto out_unlock; /* verify if ranges are overlapped within the same file */ if (same_inode) { if (destoff + len > off && destoff < off + len) goto out_unlock; } if (destoff > inode->i_size) { ret = btrfs_cont_expand(inode, inode->i_size, destoff); if (ret) goto out_unlock; } /* * Lock the target range too. Right after we replace the file extent * items in the fs tree (which now point to the cloned data), we might * have a worker replace them with extent items relative to a write * operation that was issued before this clone operation (i.e. confront * with inode.c:btrfs_finish_ordered_io). */ if (same_inode) { u64 lock_start = min_t(u64, off, destoff); u64 lock_len = max_t(u64, off, destoff) + len - lock_start; ret = lock_extent_range(src, lock_start, lock_len, true); } else { ret = btrfs_double_extent_lock(src, off, inode, destoff, len, true); } ASSERT(ret == 0); if (WARN_ON(ret)) { /* ranges in the io trees already unlocked */ goto out_unlock; } ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); if (same_inode) { u64 lock_start = min_t(u64, off, destoff); u64 lock_end = max_t(u64, off, destoff) + len - 1; unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end); } else { btrfs_double_extent_unlock(src, off, inode, destoff, len); } /* * Truncate page cache pages so that future reads will see the cloned * data immediately and not the previous data. */ truncate_inode_pages_range(&inode->i_data, round_down(destoff, PAGE_SIZE), round_up(destoff + len, PAGE_SIZE) - 1); out_unlock: if (!same_inode) btrfs_double_inode_unlock(src, inode); else inode_unlock(src); return ret; } int btrfs_clone_file_range(struct file *src_file, loff_t off, struct file *dst_file, loff_t destoff, u64 len) { return btrfs_clone_files(dst_file, src_file, off, len, destoff); } /* * there are many ways the trans_start and trans_end ioctls can lead * to deadlocks. They should only be used by applications that * basically own the machine, and have a very in depth understanding * of all the possible deadlocks and enospc problems. */ static long btrfs_ioctl_trans_start(struct file *file) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; struct btrfs_file_private *private; int ret; static bool warned = false; ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) goto out; if (!warned) { btrfs_warn(fs_info, "Userspace transaction mechanism is considered " "deprecated and slated to be removed in 4.17. " "If you have a valid use case please " "speak up on the mailing list"); WARN_ON(1); warned = true; } ret = -EINPROGRESS; private = file->private_data; if (private && private->trans) goto out; if (!private) { private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL); if (!private) return -ENOMEM; file->private_data = private; } ret = -EROFS; if (btrfs_root_readonly(root)) goto out; ret = mnt_want_write_file(file); if (ret) goto out; atomic_inc(&fs_info->open_ioctl_trans); ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root); if (IS_ERR(trans)) goto out_drop; private->trans = trans; return 0; out_drop: atomic_dec(&fs_info->open_ioctl_trans); mnt_drop_write_file(file); out: return ret; } static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *new_root; struct btrfs_dir_item *di; struct btrfs_trans_handle *trans; struct btrfs_path *path; struct btrfs_key location; struct btrfs_disk_key disk_key; u64 objectid = 0; u64 dir_id; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; if (copy_from_user(&objectid, argp, sizeof(objectid))) { ret = -EFAULT; goto out; } if (!objectid) objectid = BTRFS_FS_TREE_OBJECTID; location.objectid = objectid; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = (u64)-1; new_root = btrfs_read_fs_root_no_name(fs_info, &location); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); goto out; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { btrfs_free_path(path); ret = PTR_ERR(trans); goto out; } dir_id = btrfs_super_root_dir(fs_info->super_copy); di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path, dir_id, "default", 7, 1); if (IS_ERR_OR_NULL(di)) { btrfs_free_path(path); btrfs_end_transaction(trans); btrfs_err(fs_info, "Umm, you don't have the default diritem, this isn't going to work"); ret = -ENOENT; goto out; } btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL); btrfs_end_transaction(trans); out: mnt_drop_write_file(file); return ret; } void btrfs_get_block_group_info(struct list_head *groups_list, struct btrfs_ioctl_space_info *space) { struct btrfs_block_group_cache *block_group; space->total_bytes = 0; space->used_bytes = 0; space->flags = 0; list_for_each_entry(block_group, groups_list, list) { space->flags = block_group->flags; space->total_bytes += block_group->key.offset; space->used_bytes += btrfs_block_group_used(&block_group->item); } } static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_space_args space_args; struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; struct btrfs_ioctl_space_info *dest_orig; struct btrfs_ioctl_space_info __user *user_dest; struct btrfs_space_info *info; u64 types[] = {BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, BTRFS_BLOCK_GROUP_METADATA, BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA}; int num_types = 4; int alloc_size; int ret = 0; u64 slot_count = 0; int i, c; if (copy_from_user(&space_args, (struct btrfs_ioctl_space_args __user *)arg, sizeof(space_args))) return -EFAULT; for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; info = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { info = tmp; break; } } rcu_read_unlock(); if (!info) continue; down_read(&info->groups_sem); for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { if (!list_empty(&info->block_groups[c])) slot_count++; } up_read(&info->groups_sem); } /* * Global block reserve, exported as a space_info */ slot_count++; /* space_slots == 0 means they are asking for a count */ if (space_args.space_slots == 0) { space_args.total_spaces = slot_count; goto out; } slot_count = min_t(u64, space_args.space_slots, slot_count); alloc_size = sizeof(*dest) * slot_count; /* we generally have at most 6 or so space infos, one for each raid * level. So, a whole page should be more than enough for everyone */ if (alloc_size > PAGE_SIZE) return -ENOMEM; space_args.total_spaces = 0; dest = kmalloc(alloc_size, GFP_KERNEL); if (!dest) return -ENOMEM; dest_orig = dest; /* now we have a buffer to copy into */ for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; if (!slot_count) break; info = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { info = tmp; break; } } rcu_read_unlock(); if (!info) continue; down_read(&info->groups_sem); for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { if (!list_empty(&info->block_groups[c])) { btrfs_get_block_group_info( &info->block_groups[c], &space); memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; slot_count--; } if (!slot_count) break; } up_read(&info->groups_sem); } /* * Add global block reserve */ if (slot_count) { struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; spin_lock(&block_rsv->lock); space.total_bytes = block_rsv->size; space.used_bytes = block_rsv->size - block_rsv->reserved; spin_unlock(&block_rsv->lock); space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV; memcpy(dest, &space, sizeof(space)); space_args.total_spaces++; } user_dest = (struct btrfs_ioctl_space_info __user *) (arg + sizeof(struct btrfs_ioctl_space_args)); if (copy_to_user(user_dest, dest_orig, alloc_size)) ret = -EFAULT; kfree(dest_orig); out: if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) ret = -EFAULT; return ret; } /* * there are many ways the trans_start and trans_end ioctls can lead * to deadlocks. They should only be used by applications that * basically own the machine, and have a very in depth understanding * of all the possible deadlocks and enospc problems. */ long btrfs_ioctl_trans_end(struct file *file) { struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_file_private *private = file->private_data; if (!private || !private->trans) return -EINVAL; btrfs_end_transaction(private->trans); private->trans = NULL; atomic_dec(&root->fs_info->open_ioctl_trans); mnt_drop_write_file(file); return 0; } static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root, void __user *argp) { struct btrfs_trans_handle *trans; u64 transid; int ret; trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) return PTR_ERR(trans); /* No running transaction, don't bother */ transid = root->fs_info->last_trans_committed; goto out; } transid = trans->transid; ret = btrfs_commit_transaction_async(trans, 0); if (ret) { btrfs_end_transaction(trans); return ret; } out: if (argp) if (copy_to_user(argp, &transid, sizeof(transid))) return -EFAULT; return 0; } static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info, void __user *argp) { u64 transid; if (argp) { if (copy_from_user(&transid, argp, sizeof(transid))) return -EFAULT; } else { transid = 0; /* current trans */ } return btrfs_wait_for_commit(fs_info, transid); } static long btrfs_ioctl_scrub(struct file *file, void __user *arg) { struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb); struct btrfs_ioctl_scrub_args *sa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); if (!(sa->flags & BTRFS_SCRUB_READONLY)) { ret = mnt_want_write_file(file); if (ret) goto out; } ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end, &sa->progress, sa->flags & BTRFS_SCRUB_READONLY, 0); if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; if (!(sa->flags & BTRFS_SCRUB_READONLY)) mnt_drop_write_file(file); out: kfree(sa); return ret; } static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return btrfs_scrub_cancel(fs_info); } static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_scrub_args *sa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress); if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; kfree(sa); return ret; } static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_get_dev_stats *sa; int ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) { kfree(sa); return -EPERM; } ret = btrfs_get_dev_stats(fs_info, sa); if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; kfree(sa); return ret; } static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_dev_replace_args *p; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; p = memdup_user(arg, sizeof(*p)); if (IS_ERR(p)) return PTR_ERR(p); switch (p->cmd) { case BTRFS_IOCTL_DEV_REPLACE_CMD_START: if (sb_rdonly(fs_info->sb)) { ret = -EROFS; goto out; } if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; } else { ret = btrfs_dev_replace_by_ioctl(fs_info, p); clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); } break; case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS: btrfs_dev_replace_status(fs_info, p); ret = 0; break; case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL: ret = btrfs_dev_replace_cancel(fs_info, p); break; default: ret = -EINVAL; break; } if (copy_to_user(arg, p, sizeof(*p))) ret = -EFAULT; out: kfree(p); return ret; } static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) { int ret = 0; int i; u64 rel_ptr; int size; struct btrfs_ioctl_ino_path_args *ipa = NULL; struct inode_fs_paths *ipath = NULL; struct btrfs_path *path; if (!capable(CAP_DAC_READ_SEARCH)) return -EPERM; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ipa = memdup_user(arg, sizeof(*ipa)); if (IS_ERR(ipa)) { ret = PTR_ERR(ipa); ipa = NULL; goto out; } size = min_t(u32, ipa->size, 4096); ipath = init_ipath(size, root, path); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto out; } ret = paths_from_inode(ipa->inum, ipath); if (ret < 0) goto out; for (i = 0; i < ipath->fspath->elem_cnt; ++i) { rel_ptr = ipath->fspath->val[i] - (u64)(unsigned long)ipath->fspath->val; ipath->fspath->val[i] = rel_ptr; } ret = copy_to_user((void *)(unsigned long)ipa->fspath, (void *)(unsigned long)ipath->fspath, size); if (ret) { ret = -EFAULT; goto out; } out: btrfs_free_path(path); free_ipath(ipath); kfree(ipa); return ret; } static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx) { struct btrfs_data_container *inodes = ctx; const size_t c = 3 * sizeof(u64); if (inodes->bytes_left >= c) { inodes->bytes_left -= c; inodes->val[inodes->elem_cnt] = inum; inodes->val[inodes->elem_cnt + 1] = offset; inodes->val[inodes->elem_cnt + 2] = root; inodes->elem_cnt += 3; } else { inodes->bytes_missing += c - inodes->bytes_left; inodes->bytes_left = 0; inodes->elem_missed += 3; } return 0; } static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, void __user *arg) { int ret = 0; int size; struct btrfs_ioctl_logical_ino_args *loi; struct btrfs_data_container *inodes = NULL; struct btrfs_path *path = NULL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; loi = memdup_user(arg, sizeof(*loi)); if (IS_ERR(loi)) return PTR_ERR(loi); path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } size = min_t(u32, loi->size, SZ_64K); inodes = init_data_container(size); if (IS_ERR(inodes)) { ret = PTR_ERR(inodes); inodes = NULL; goto out; } ret = iterate_inodes_from_logical(loi->logical, fs_info, path, build_ino_list, inodes); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) goto out; ret = copy_to_user((void *)(unsigned long)loi->inodes, (void *)(unsigned long)inodes, size); if (ret) ret = -EFAULT; out: btrfs_free_path(path); kvfree(inodes); kfree(loi); return ret; } void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, struct btrfs_ioctl_balance_args *bargs) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; bargs->flags = bctl->flags; if (atomic_read(&fs_info->balance_running)) bargs->state |= BTRFS_BALANCE_STATE_RUNNING; if (atomic_read(&fs_info->balance_pause_req)) bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ; if (atomic_read(&fs_info->balance_cancel_req)) bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ; memcpy(&bargs->data, &bctl->data, sizeof(bargs->data)); memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta)); memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys)); if (lock) { spin_lock(&fs_info->balance_lock); memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat)); spin_unlock(&fs_info->balance_lock); } else { memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat)); } } static long btrfs_ioctl_balance(struct file *file, void __user *arg) { struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_ioctl_balance_args *bargs; struct btrfs_balance_control *bctl; bool need_unlock; /* for mut. excl. ops lock */ int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; again: if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { mutex_lock(&fs_info->volume_mutex); mutex_lock(&fs_info->balance_mutex); need_unlock = true; goto locked; } /* * mut. excl. ops lock is locked. Three possibilities: * (1) some other op is running * (2) balance is running * (3) balance is paused -- special case (think resume) */ mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) { /* this is either (2) or (3) */ if (!atomic_read(&fs_info->balance_running)) { mutex_unlock(&fs_info->balance_mutex); if (!mutex_trylock(&fs_info->volume_mutex)) goto again; mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl && !atomic_read(&fs_info->balance_running)) { /* this is (3) */ need_unlock = false; goto locked; } mutex_unlock(&fs_info->balance_mutex); mutex_unlock(&fs_info->volume_mutex); goto again; } else { /* this is (2) */ mutex_unlock(&fs_info->balance_mutex); ret = -EINPROGRESS; goto out; } } else { /* this is (1) */ mutex_unlock(&fs_info->balance_mutex); ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; goto out; } locked: BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)); if (arg) { bargs = memdup_user(arg, sizeof(*bargs)); if (IS_ERR(bargs)) { ret = PTR_ERR(bargs); goto out_unlock; } if (bargs->flags & BTRFS_BALANCE_RESUME) { if (!fs_info->balance_ctl) { ret = -ENOTCONN; goto out_bargs; } bctl = fs_info->balance_ctl; spin_lock(&fs_info->balance_lock); bctl->flags |= BTRFS_BALANCE_RESUME; spin_unlock(&fs_info->balance_lock); goto do_balance; } } else { bargs = NULL; } if (fs_info->balance_ctl) { ret = -EINPROGRESS; goto out_bargs; } bctl = kzalloc(sizeof(*bctl), GFP_KERNEL); if (!bctl) { ret = -ENOMEM; goto out_bargs; } bctl->fs_info = fs_info; if (arg) { memcpy(&bctl->data, &bargs->data, sizeof(bctl->data)); memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta)); memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys)); bctl->flags = bargs->flags; } else { /* balance everything - no filters */ bctl->flags |= BTRFS_BALANCE_TYPE_MASK; } if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { ret = -EINVAL; goto out_bctl; } do_balance: /* * Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP * goes to to btrfs_balance. bctl is freed in __cancel_balance, * or, if restriper was paused all the way until unmount, in * free_fs_info. The flag is cleared in __cancel_balance. */ need_unlock = false; ret = btrfs_balance(bctl, bargs); bctl = NULL; if (arg) { if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; } out_bctl: kfree(bctl); out_bargs: kfree(bargs); out_unlock: mutex_unlock(&fs_info->balance_mutex); mutex_unlock(&fs_info->volume_mutex); if (need_unlock) clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); out: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case BTRFS_BALANCE_CTL_PAUSE: return btrfs_pause_balance(fs_info); case BTRFS_BALANCE_CTL_CANCEL: return btrfs_cancel_balance(fs_info); } return -EINVAL; } static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_balance_args *bargs; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { ret = -ENOTCONN; goto out; } bargs = kzalloc(sizeof(*bargs), GFP_KERNEL); if (!bargs) { ret = -ENOMEM; goto out; } update_ioctl_balance_args(fs_info, 1, bargs); if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; kfree(bargs); out: mutex_unlock(&fs_info->balance_mutex); return ret; } static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_quota_ctl_args *sa; struct btrfs_trans_handle *trans = NULL; int ret; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } down_write(&fs_info->subvol_sem); trans = btrfs_start_transaction(fs_info->tree_root, 2); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } switch (sa->cmd) { case BTRFS_QUOTA_CTL_ENABLE: ret = btrfs_quota_enable(trans, fs_info); break; case BTRFS_QUOTA_CTL_DISABLE: ret = btrfs_quota_disable(trans, fs_info); break; default: ret = -EINVAL; break; } err = btrfs_commit_transaction(trans); if (err && !ret) ret = err; out: kfree(sa); up_write(&fs_info->subvol_sem); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_assign_args *sa; struct btrfs_trans_handle *trans; int ret; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } if (sa->assign) { ret = btrfs_add_qgroup_relation(trans, fs_info, sa->src, sa->dst); } else { ret = btrfs_del_qgroup_relation(trans, fs_info, sa->src, sa->dst); } /* update qgroup status and info */ err = btrfs_run_qgroups(trans, fs_info); if (err < 0) btrfs_handle_fs_error(fs_info, err, "failed to update qgroup status and info"); err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_create_args *sa; struct btrfs_trans_handle *trans; int ret; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } if (!sa->qgroupid) { ret = -EINVAL; goto out; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } if (sa->create) { ret = btrfs_create_qgroup(trans, fs_info, sa->qgroupid); } else { ret = btrfs_remove_qgroup(trans, fs_info, sa->qgroupid); } err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_limit_args *sa; struct btrfs_trans_handle *trans; int ret; int err; u64 qgroupid; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } qgroupid = sa->qgroupid; if (!qgroupid) { /* take the current subvol as qgroup */ qgroupid = root->root_key.objectid; } ret = btrfs_limit_qgroup(trans, fs_info, qgroupid, &sa->lim); err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_quota_rescan_args *qsa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; qsa = memdup_user(arg, sizeof(*qsa)); if (IS_ERR(qsa)) { ret = PTR_ERR(qsa); goto drop_write; } if (qsa->flags) { ret = -EINVAL; goto out; } ret = btrfs_qgroup_rescan(fs_info); out: kfree(qsa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_ioctl_quota_rescan_args *qsa; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; qsa = kzalloc(sizeof(*qsa), GFP_KERNEL); if (!qsa) return -ENOMEM; if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { qsa->flags = 1; qsa->progress = fs_info->qgroup_rescan_progress.objectid; } if (copy_to_user(arg, qsa, sizeof(*qsa))) ret = -EFAULT; kfree(qsa); return ret; } static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); if (!capable(CAP_SYS_ADMIN)) return -EPERM; return btrfs_qgroup_wait_for_completion(fs_info, true); } static long _btrfs_ioctl_set_received_subvol(struct file *file, struct btrfs_ioctl_received_subvol_args *sa) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root_item *root_item = &root->root_item; struct btrfs_trans_handle *trans; struct timespec ct = current_time(inode); int ret = 0; int received_uuid_changed; if (!inode_owner_or_capable(inode)) return -EPERM; ret = mnt_want_write_file(file); if (ret < 0) return ret; down_write(&fs_info->subvol_sem); if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out; } if (btrfs_root_readonly(root)) { ret = -EROFS; goto out; } /* * 1 - root item * 2 - uuid items (received uuid + subvol uuid) */ trans = btrfs_start_transaction(root, 3); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; goto out; } sa->rtransid = trans->transid; sa->rtime.sec = ct.tv_sec; sa->rtime.nsec = ct.tv_nsec; received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE); if (received_uuid_changed && !btrfs_is_empty_uuid(root_item->received_uuid)) btrfs_uuid_tree_rem(trans, fs_info, root_item->received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, root->root_key.objectid); memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE); btrfs_set_root_stransid(root_item, sa->stransid); btrfs_set_root_rtransid(root_item, sa->rtransid); btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec); btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec); btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec); btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec); ret = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); if (ret < 0) { btrfs_end_transaction(trans); goto out; } if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) { ret = btrfs_uuid_tree_add(trans, fs_info, sa->uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, root->root_key.objectid); if (ret < 0 && ret != -EEXIST) { btrfs_abort_transaction(trans, ret); goto out; } } ret = btrfs_commit_transaction(trans); if (ret < 0) { btrfs_abort_transaction(trans, ret); goto out; } out: up_write(&fs_info->subvol_sem); mnt_drop_write_file(file); return ret; } #ifdef CONFIG_64BIT static long btrfs_ioctl_set_received_subvol_32(struct file *file, void __user *arg) { struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL; struct btrfs_ioctl_received_subvol_args *args64 = NULL; int ret = 0; args32 = memdup_user(arg, sizeof(*args32)); if (IS_ERR(args32)) return PTR_ERR(args32); args64 = kmalloc(sizeof(*args64), GFP_KERNEL); if (!args64) { ret = -ENOMEM; goto out; } memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE); args64->stransid = args32->stransid; args64->rtransid = args32->rtransid; args64->stime.sec = args32->stime.sec; args64->stime.nsec = args32->stime.nsec; args64->rtime.sec = args32->rtime.sec; args64->rtime.nsec = args32->rtime.nsec; args64->flags = args32->flags; ret = _btrfs_ioctl_set_received_subvol(file, args64); if (ret) goto out; memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE); args32->stransid = args64->stransid; args32->rtransid = args64->rtransid; args32->stime.sec = args64->stime.sec; args32->stime.nsec = args64->stime.nsec; args32->rtime.sec = args64->rtime.sec; args32->rtime.nsec = args64->rtime.nsec; args32->flags = args64->flags; ret = copy_to_user(arg, args32, sizeof(*args32)); if (ret) ret = -EFAULT; out: kfree(args32); kfree(args64); return ret; } #endif static long btrfs_ioctl_set_received_subvol(struct file *file, void __user *arg) { struct btrfs_ioctl_received_subvol_args *sa = NULL; int ret = 0; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); ret = _btrfs_ioctl_set_received_subvol(file, sa); if (ret) goto out; ret = copy_to_user(arg, sa, sizeof(*sa)); if (ret) ret = -EFAULT; out: kfree(sa); return ret; } static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); size_t len; int ret; char label[BTRFS_LABEL_SIZE]; spin_lock(&fs_info->super_lock); memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE); spin_unlock(&fs_info->super_lock); len = strnlen(label, BTRFS_LABEL_SIZE); if (len == BTRFS_LABEL_SIZE) { btrfs_warn(fs_info, "label is too long, return the first %zu bytes", --len); } ret = copy_to_user(arg, label, len); return ret ? -EFAULT : 0; } static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_trans_handle *trans; char label[BTRFS_LABEL_SIZE]; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(label, arg, sizeof(label))) return -EFAULT; if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) { btrfs_err(fs_info, "unable to set label with more than %d bytes", BTRFS_LABEL_SIZE - 1); return -EINVAL; } ret = mnt_want_write_file(file); if (ret) return ret; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_unlock; } spin_lock(&fs_info->super_lock); strcpy(super_block->label, label); spin_unlock(&fs_info->super_lock); ret = btrfs_commit_transaction(trans); out_unlock: mnt_drop_write_file(file); return ret; } #define INIT_FEATURE_FLAGS(suffix) \ { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \ .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \ .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix } int btrfs_ioctl_get_supported_features(void __user *arg) { static const struct btrfs_ioctl_feature_flags features[3] = { INIT_FEATURE_FLAGS(SUPP), INIT_FEATURE_FLAGS(SAFE_SET), INIT_FEATURE_FLAGS(SAFE_CLEAR) }; if (copy_to_user(arg, &features, sizeof(features))) return -EFAULT; return 0; } static int btrfs_ioctl_get_features(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags features; features.compat_flags = btrfs_super_compat_flags(super_block); features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block); features.incompat_flags = btrfs_super_incompat_flags(super_block); if (copy_to_user(arg, &features, sizeof(features))) return -EFAULT; return 0; } static int check_feature_bits(struct btrfs_fs_info *fs_info, enum btrfs_feature_set set, u64 change_mask, u64 flags, u64 supported_flags, u64 safe_set, u64 safe_clear) { const char *type = btrfs_feature_set_names[set]; char *names; u64 disallowed, unsupported; u64 set_mask = flags & change_mask; u64 clear_mask = ~flags & change_mask; unsupported = set_mask & ~supported_flags; if (unsupported) { names = btrfs_printable_features(set, unsupported); if (names) { btrfs_warn(fs_info, "this kernel does not support the %s feature bit%s", names, strchr(names, ',') ? "s" : ""); kfree(names); } else btrfs_warn(fs_info, "this kernel does not support %s bits 0x%llx", type, unsupported); return -EOPNOTSUPP; } disallowed = set_mask & ~safe_set; if (disallowed) { names = btrfs_printable_features(set, disallowed); if (names) { btrfs_warn(fs_info, "can't set the %s feature bit%s while mounted", names, strchr(names, ',') ? "s" : ""); kfree(names); } else btrfs_warn(fs_info, "can't set %s bits 0x%llx while mounted", type, disallowed); return -EPERM; } disallowed = clear_mask & ~safe_clear; if (disallowed) { names = btrfs_printable_features(set, disallowed); if (names) { btrfs_warn(fs_info, "can't clear the %s feature bit%s while mounted", names, strchr(names, ',') ? "s" : ""); kfree(names); } else btrfs_warn(fs_info, "can't clear %s bits 0x%llx while mounted", type, disallowed); return -EPERM; } return 0; } #define check_feature(fs_info, change_mask, flags, mask_base) \ check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \ BTRFS_FEATURE_ ## mask_base ## _SUPP, \ BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \ BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR) static int btrfs_ioctl_set_features(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags flags[2]; struct btrfs_trans_handle *trans; u64 newflags; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(flags, arg, sizeof(flags))) return -EFAULT; /* Nothing to do */ if (!flags[0].compat_flags && !flags[0].compat_ro_flags && !flags[0].incompat_flags) return 0; ret = check_feature(fs_info, flags[0].compat_flags, flags[1].compat_flags, COMPAT); if (ret) return ret; ret = check_feature(fs_info, flags[0].compat_ro_flags, flags[1].compat_ro_flags, COMPAT_RO); if (ret) return ret; ret = check_feature(fs_info, flags[0].incompat_flags, flags[1].incompat_flags, INCOMPAT); if (ret) return ret; ret = mnt_want_write_file(file); if (ret) return ret; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_drop_write; } spin_lock(&fs_info->super_lock); newflags = btrfs_super_compat_flags(super_block); newflags |= flags[0].compat_flags & flags[1].compat_flags; newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags); btrfs_set_super_compat_flags(super_block, newflags); newflags = btrfs_super_compat_ro_flags(super_block); newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags; newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags); btrfs_set_super_compat_ro_flags(super_block, newflags); newflags = btrfs_super_incompat_flags(super_block); newflags |= flags[0].incompat_flags & flags[1].incompat_flags; newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags); btrfs_set_super_incompat_flags(super_block, newflags); spin_unlock(&fs_info->super_lock); ret = btrfs_commit_transaction(trans); out_drop_write: mnt_drop_write_file(file); return ret; } long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_root *root = BTRFS_I(inode)->root; void __user *argp = (void __user *)arg; switch (cmd) { case FS_IOC_GETFLAGS: return btrfs_ioctl_getflags(file, argp); case FS_IOC_SETFLAGS: return btrfs_ioctl_setflags(file, argp); case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); case FITRIM: return btrfs_ioctl_fitrim(file, argp); case BTRFS_IOC_SNAP_CREATE: return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: return btrfs_ioctl_snap_create_v2(file, argp, 0); case BTRFS_IOC_SUBVOL_CREATE: return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SUBVOL_CREATE_V2: return btrfs_ioctl_snap_create_v2(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); case BTRFS_IOC_SUBVOL_GETFLAGS: return btrfs_ioctl_subvol_getflags(file, argp); case BTRFS_IOC_SUBVOL_SETFLAGS: return btrfs_ioctl_subvol_setflags(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: return btrfs_ioctl_default_subvol(file, argp); case BTRFS_IOC_DEFRAG: return btrfs_ioctl_defrag(file, NULL); case BTRFS_IOC_DEFRAG_RANGE: return btrfs_ioctl_defrag(file, argp); case BTRFS_IOC_RESIZE: return btrfs_ioctl_resize(file, argp); case BTRFS_IOC_ADD_DEV: return btrfs_ioctl_add_dev(fs_info, argp); case BTRFS_IOC_RM_DEV: return btrfs_ioctl_rm_dev(file, argp); case BTRFS_IOC_RM_DEV_V2: return btrfs_ioctl_rm_dev_v2(file, argp); case BTRFS_IOC_FS_INFO: return btrfs_ioctl_fs_info(fs_info, argp); case BTRFS_IOC_DEV_INFO: return btrfs_ioctl_dev_info(fs_info, argp); case BTRFS_IOC_BALANCE: return btrfs_ioctl_balance(file, NULL); case BTRFS_IOC_TRANS_START: return btrfs_ioctl_trans_start(file); case BTRFS_IOC_TRANS_END: return btrfs_ioctl_trans_end(file); case BTRFS_IOC_TREE_SEARCH: return btrfs_ioctl_tree_search(file, argp); case BTRFS_IOC_TREE_SEARCH_V2: return btrfs_ioctl_tree_search_v2(file, argp); case BTRFS_IOC_INO_LOOKUP: return btrfs_ioctl_ino_lookup(file, argp); case BTRFS_IOC_INO_PATHS: return btrfs_ioctl_ino_to_path(root, argp); case BTRFS_IOC_LOGICAL_INO: return btrfs_ioctl_logical_to_ino(fs_info, argp); case BTRFS_IOC_SPACE_INFO: return btrfs_ioctl_space_info(fs_info, argp); case BTRFS_IOC_SYNC: { int ret; ret = btrfs_start_delalloc_roots(fs_info, 0, -1); if (ret) return ret; ret = btrfs_sync_fs(inode->i_sb, 1); /* * The transaction thread may want to do more work, * namely it pokes the cleaner kthread that will start * processing uncleaned subvols. */ wake_up_process(fs_info->transaction_kthread); return ret; } case BTRFS_IOC_START_SYNC: return btrfs_ioctl_start_sync(root, argp); case BTRFS_IOC_WAIT_SYNC: return btrfs_ioctl_wait_sync(fs_info, argp); case BTRFS_IOC_SCRUB: return btrfs_ioctl_scrub(file, argp); case BTRFS_IOC_SCRUB_CANCEL: return btrfs_ioctl_scrub_cancel(fs_info); case BTRFS_IOC_SCRUB_PROGRESS: return btrfs_ioctl_scrub_progress(fs_info, argp); case BTRFS_IOC_BALANCE_V2: return btrfs_ioctl_balance(file, argp); case BTRFS_IOC_BALANCE_CTL: return btrfs_ioctl_balance_ctl(fs_info, arg); case BTRFS_IOC_BALANCE_PROGRESS: return btrfs_ioctl_balance_progress(fs_info, argp); case BTRFS_IOC_SET_RECEIVED_SUBVOL: return btrfs_ioctl_set_received_subvol(file, argp); #ifdef CONFIG_64BIT case BTRFS_IOC_SET_RECEIVED_SUBVOL_32: return btrfs_ioctl_set_received_subvol_32(file, argp); #endif case BTRFS_IOC_SEND: return btrfs_ioctl_send(file, argp); case BTRFS_IOC_GET_DEV_STATS: return btrfs_ioctl_get_dev_stats(fs_info, argp); case BTRFS_IOC_QUOTA_CTL: return btrfs_ioctl_quota_ctl(file, argp); case BTRFS_IOC_QGROUP_ASSIGN: return btrfs_ioctl_qgroup_assign(file, argp); case BTRFS_IOC_QGROUP_CREATE: return btrfs_ioctl_qgroup_create(file, argp); case BTRFS_IOC_QGROUP_LIMIT: return btrfs_ioctl_qgroup_limit(file, argp); case BTRFS_IOC_QUOTA_RESCAN: return btrfs_ioctl_quota_rescan(file, argp); case BTRFS_IOC_QUOTA_RESCAN_STATUS: return btrfs_ioctl_quota_rescan_status(file, argp); case BTRFS_IOC_QUOTA_RESCAN_WAIT: return btrfs_ioctl_quota_rescan_wait(file, argp); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); case BTRFS_IOC_GET_FSLABEL: return btrfs_ioctl_get_fslabel(file, argp); case BTRFS_IOC_SET_FSLABEL: return btrfs_ioctl_set_fslabel(file, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: return btrfs_ioctl_get_features(file, argp); case BTRFS_IOC_SET_FEATURES: return btrfs_ioctl_set_features(file, argp); } return -ENOTTY; } #ifdef CONFIG_COMPAT long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* * These all access 32-bit values anyway so no further * handling is necessary. */ switch (cmd) { case FS_IOC32_GETFLAGS: cmd = FS_IOC_GETFLAGS; break; case FS_IOC32_SETFLAGS: cmd = FS_IOC_SETFLAGS; break; case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; } return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif
paulluo/linux
fs/btrfs/ioctl.c
C
gpl-2.0
140,009
// ********************************************************************** // // Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved. // // This copy of Ice is licensed to you under the terms described in the // ICE_LICENSE file included in this distribution. // // ********************************************************************** #include <Ice/Protocol.h> #include <Ice/LocalException.h> namespace IceInternal { const Ice::Byte magic[] = { 0x49, 0x63, 0x65, 0x50 }; // 'I', 'c', 'e', 'P' const Ice::Byte requestHdr[] = { magic[0], magic[1], magic[2], magic[3], protocolMajor, protocolMinor, protocolEncodingMajor, protocolEncodingMinor, requestMsg, 0, // Compression status 0, 0, 0, 0, // Message size (placeholder) 0, 0, 0, 0 // Request id (placeholder) }; const Ice::Byte requestBatchHdr[] = { magic[0], magic[1], magic[2], magic[3], protocolMajor, protocolMinor, protocolEncodingMajor, protocolEncodingMinor, requestBatchMsg, 0, // Compression status 0, 0, 0, 0, // Message size (place holder) 0, 0, 0, 0 // Number of requests in batch (placeholder) }; const Ice::Byte replyHdr[] = { magic[0], magic[1], magic[2], magic[3], protocolMajor, protocolMinor, protocolEncodingMajor, protocolEncodingMinor, replyMsg, 0, // Compression status 0, 0, 0, 0 // Message size (placeholder) }; void stringToMajorMinor(const std::string& str, Ice::Byte& major, Ice::Byte& minor) { std::string::size_type pos = str.find_first_of("."); if(pos == std::string::npos) { Ice::VersionParseException ex(__FILE__, __LINE__); ex.str = "malformed version value `" + str + "'"; throw ex; } std::istringstream majStr(str.substr(0, pos)); Ice::Int majVersion; if(!(majStr >> majVersion) || !majStr.eof()) { Ice::VersionParseException ex(__FILE__, __LINE__); ex.str = "invalid major version value `" + str + "'"; throw ex; } std::istringstream minStr(str.substr(pos + 1, std::string::npos)); Ice::Int minVersion; if(!(minStr >> minVersion) || !minStr.eof()) { Ice::VersionParseException ex(__FILE__, __LINE__); ex.str = "invalid minor version value `" + str + "'"; throw ex; } if(majVersion < 1 || majVersion > 255 || minVersion < 0 || minVersion > 255) { Ice::VersionParseException ex(__FILE__, __LINE__); ex.str = "range error in version `" + str + "'"; throw ex; } major = static_cast<Ice::Byte>(majVersion); minor = static_cast<Ice::Byte>(minVersion); } void throwUnsupportedProtocolException(const char* f, int l, const Ice::ProtocolVersion& v, const Ice::ProtocolVersion& s) { throw Ice::UnsupportedProtocolException(f, l, "", v, s); } void throwUnsupportedEncodingException(const char* f, int l, const Ice::EncodingVersion& v, const Ice::EncodingVersion& s) { throw Ice::UnsupportedEncodingException(f, l, "", v, s); } } namespace Ice { const EncodingVersion currentEncoding = { IceInternal::encodingMajor, IceInternal::encodingMinor }; const ProtocolVersion currentProtocol = { IceInternal::protocolMajor, IceInternal::protocolMinor }; // // The encoding to use for protocol messages, this version is tied to // the protocol version. // const EncodingVersion currentProtocolEncoding = { IceInternal::protocolEncodingMajor, IceInternal::protocolEncodingMinor }; const ProtocolVersion Protocol_1_0 = { 1, 0 }; const EncodingVersion Encoding_1_0 = { 1, 0 }; const EncodingVersion Encoding_1_1 = { 1, 1 }; }
elijah513/ice
cpp/src/Ice/Protocol.cpp
C++
gpl-2.0
3,687
/* * Copyright (c) 2014 Free Software Foundation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _GRUB_GLUE_H #define _GRUB_GLUE_H #define GRUB_FILE __FILE__ #define grub_memcmp memcmp #define grub_printf printf #define grub_puts_ puts #include <mach/mach_types.h> #include <i386/vm_param.h> /* Warning: this leaks memory maps for now, do not use it yet for something * else than Mach shutdown. */ vm_offset_t io_map_cached(vm_offset_t phys_addr, vm_size_t size); #endif /* _GRUB_GLUE_H */
sebastianscatularo/gnumach
i386/grub/glue.h
C
gpl-2.0
1,111
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Untitled Page</title> <script type="text/javascript"> function InsertHtml() { // Get the current selection. var oSel = window.getSelection() ; // Get the first available range. var oRange = oSel.getRangeAt(0) ; // Create a fragment with the input HTML. var oFragment = oRange.createContextualFragment( '- This is a <b>test</b> -' ) ; oRange.insertNode(oFragment) ; } </script> </head> <body> <p> This is some text. Select some text and click the following button to insert HTML on it. </p> <p> <input type="button" value="Insert HTML" onclick="InsertHtml();" /> </p> </body> </html>
SuriyaaKudoIsc/wikia-app-test
extensions/FCKeditor/fckeditor/_dev/browserbugs/safari/createContextualFragment.html
HTML
gpl-2.0
815
<?php /** * File containing the eZNotificationEventHandler class. * * @copyright Copyright (C) eZ Systems AS. All rights reserved. * @license For full copyright and license information view LICENSE file distributed with this source code. * @version 2014.07.0 * @package kernel */ /*! \class eZNotificationEventHandler eznotificationeventhandler.php \brief The class eZNotificationEventHandler does */ class eZNotificationEventHandler { const EVENT_HANDLED = 0; const EVENT_SKIPPED = 1; const EVENT_UNKNOWN = 2; const EVENT_ERROR = 3; /*! Constructor */ function eZNotificationEventHandler( $idString, $name ) { $this->IDString = $idString; $this->Name = $name; } function attributes() { return array( 'id_string', 'name' ); } function hasAttribute( $attr ) { return in_array( $attr, $this->attributes() ); } function attribute( $attr ) { if ( $attr == 'id_string' ) { return $this->IDString; } else if ( $attr == 'name' ) { return $this->Name; } eZDebug::writeError( "Attribute '$attr' does not exist", __METHOD__ ); return null; } function handle( $event ) { return true; } /*! Cleanup any specific tables or other resources. */ function cleanup() { } function fetchHttpInput( $http, $module ) { return true; } function storeSettings( $http, $module ) { return true; } public $IDString = false; public $Name = false; } ?>
tonvinh/ez
ezpublish_legacy/kernel/classes/notification/eznotificationeventhandler.php
PHP
gpl-2.0
1,660
/* **************************************************************** * Portions Copyright 1998, 2009-2012 VMware, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * ****************************************************************/ /****************************************************************** * * linux_scsi_transport.c * * Linux scsi transport emulation. Covers transport functionality of * SCSI drivers - pSCSI, FC(with NPIV) and SAS * * From linux-2.6.18-8/drivers/scsi/scsi_transport_fc.c: * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 2004-2005 James Smart, Emulex Corporation * * From linux-2.6.18-8/drivers/scsi/scsi_transport_sas.c: * * Copyright (C) 2005-2006 Dell Inc. * * From linux-2.6.18-8/drivers/scsi/scsi_transport_spi.c: * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com> * * From linux-2.6.18-8/drivers/scsi/scsi.c: * * Copyright (C) 1992 Drew Eckhardt * Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale * Copyright (C) 2002, 2003 Christoph Hellwig * ******************************************************************/ #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_transport_sas.h> #include <vmklinux_92/vmklinux_scsi.h> #include <linux/pci.h> #include <linux/delay.h> #include "vmkapi.h" #include "linux_scsi.h" /* To be used for SCSI emulation */ #include "linux_scsi_transport.h" /* To be used for SCSI transport */ #include "linux_stubs.h" #define VMKLNX_LOG_HANDLE LinScsiTransport #include "vmklinux_log.h" /* * The PPR values at which you calculate the period in ns by multiplying * by 4 */ #define SPI_STATIC_PPR 0x0c static const int ppr_to_ps[] = { /* The PPR values 0-6 are reserved, fill them in when * the committee defines them */ -1, /* 0x00 */ -1, /* 0x01 */ -1, /* 0x02 */ -1, /* 0x03 */ -1, /* 0x04 */ -1, /* 0x05 */ -1, /* 0x06 */ 3125, /* 0x07 */ 6250, /* 0x08 */ 12500, /* 0x09 */ 25000, /* 0x0a */ 30300, /* 0x0b */ 50000, /* 0x0c */ }; static atomic_t fc_event_seq; /* * dev_loss_tmo: the default number of seconds that the FC transport * should insulate the loss of a remote port. * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT. */ static unsigned int fc_dev_loss_tmo = 10; /* seconds */ /* * remove_on_dev_loss: controls whether the transport will * remove a scsi target after the device loss timer expires. * Removal on disconnect is modeled after the USB subsystem * and expects subsystems layered on SCSI to be aware of * potential device loss and handle it appropriately. However, * many subsystems do not support device removal, leaving situations * where structure references may remain, causing new device * name assignments, etc., if the target returns. */ static unsigned int fc_remove_on_dev_loss = 0; static void spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer); static enum spi_compare_returns spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries); static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, enum spi_compare_returns (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)); static int spi_execute(struct scsi_device *sdev, const void *cmd, enum dma_data_direction dir, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr); static int fc_queue_work(struct Scsi_Host *shost, struct work_struct *work); static void fc_timeout_deleted_rport(struct work_struct *work); static void fc_timeout_fail_rport_io(struct work_struct *work); static void fc_scsi_scan_rport(struct work_struct *work); static void fc_starget_delete(struct work_struct *work); static void fc_rport_final_delete(struct work_struct *work); static void fc_flush_work(struct Scsi_Host *shost); static void fc_flush_devloss(struct Scsi_Host *shost); static int fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *dwork, unsigned long delay); static void fc_rport_dev_release(struct device *dev); static void fc_vport_dev_release(struct device *dev); static void fc_vport_sched_delete(struct work_struct *work); static int vmk_fc_vport_terminate(struct fc_vport *vport); static void sas_phy_release(struct device *dev); static void sas_port_release(struct device *dev); static void sas_end_device_release(struct device *dev); static void sas_expander_release(struct device *dev); static int sas_assign_scsi_target_id(struct sas_rphy *rphy, struct sas_host_attrs *sas_host); static int sas_scsi_target_reparent(struct device *dev, void *data); static int fc_host_setup(struct Scsi_Host *shost); static void fc_host_free(struct Scsi_Host *shost); static int sas_remove_scsi_target(struct device *dev, void *data); /* * vmklnx_alloc_scsimod * * alloc and init a vmklnx_ScsiModule * @type: transport type * @data: transport data * * RETURN VALUE: * retval initialized vmklnx_ScsiModule * retval NULL out of memory * */ static struct vmklnx_ScsiModule * vmklnx_alloc_scsimod(vmklnx_ScsiTransportType type, void *data) { struct vmklnx_ScsiModule *vmklnx26ScsiModule; vmklnx26ScsiModule = kzalloc(sizeof(struct vmklnx_ScsiModule), GFP_KERNEL); VMK_ASSERT(vmklnx26ScsiModule); if (!vmklnx26ScsiModule) { VMKLNX_WARN("Unable to allocate memory for vmklnx_ScsiModule"); return NULL; } vmklnx26ScsiModule->moduleID = vmk_ModuleStackTop(); vmklnx26ScsiModule->transportType = type; vmklnx26ScsiModule->transportData = data; return vmklnx26ScsiModule; } /** * vmklnx_generic_san_attach_transport - Attach generic transport ScsiModule * @ft: Pointer to the xsan_function_template * @target_size: size of target(transport) attributes * @host_size: size of host attributes * * Allocate and initialize all the vmklinux data structures and attach the * passed in pointer to the xsan_function_template. With generic SAN transport * type, it allows unique adapter/target ID, RDMs and periodic rescanning for * new LUNs. Caller should free allocated generic transport when finished * with vmklnx_generic_san_release_transport. * * RETURN VALUE: * - initialized scsi_transport_template if successful * - returns NULL if out of memory * */ /* _VMKLNX_CODECHECK_: vmklnx_generic_san_attach_transport */ struct scsi_transport_template * vmklnx_generic_san_attach_transport( struct xsan_function_template *ft, size_t target_size, size_t host_size ) { struct vmklnx_ScsiModule *vmklnx26ScsiModule; struct xsan_internal *i = kzalloc(sizeof(struct xsan_internal), GFP_KERNEL); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); if (unlikely(!i)) { VMKLNX_WARN("Unable to allocate memory for xsan_internal"); return NULL; } vmklnx26ScsiModule = vmklnx_alloc_scsimod(VMKLNX_SCSI_TRANSPORT_TYPE_XSAN, i); if (!vmklnx26ScsiModule) { kfree(i); return NULL; } i->t.module = (void *)vmklnx26ScsiModule; i->t.target_size = target_size; i->t.host_size = host_size; i->f = ft; return &i->t; } EXPORT_SYMBOL(vmklnx_generic_san_attach_transport); /** * Releases a generic SAN transport previously registered with * vmklnx_generic_san_attach_transport. Caller must have previously * allocated generic transport with vmklnx_generic_san_attach_transport. * @t: transport template * * RETURN VALUE: * None. */ /* _VMKLNX_CODECHECK_: vmklnx_generic_san_release_transport */ void vmklnx_generic_san_release_transport(struct scsi_transport_template *t) { struct xsan_internal *i = to_xsan_internal(t); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(t->module); VMK_ASSERT(((struct vmklnx_ScsiModule *)(t->module))->transportData == i); kfree(t->module); kfree(i); } EXPORT_SYMBOL(vmklnx_generic_san_release_transport); /* * Initialize generic SAN host attributes * * Results: * None. * * Side effects: * None. */ int vmklnx_xsan_host_setup(struct Scsi_Host *shost) { struct xsan_internal *i; int ret = 0; VMK_ASSERT(shost); /* * Create Mgmt Adapter Instance to our management */ if (XsanLinuxAttachMgmtAdapter(shost)) { return -ENOMEM; } VMK_ASSERT(shost->transportt); i = to_xsan_internal(shost->transportt); if (i->f->setup_host_attributes) { VMKAPI_MODULE_CALL(SCSI_GET_MODULE_ID(shost), ret, i->f->setup_host_attributes, shost); } return ret; } /* * Initialize generic SAN transport attributes * * Results: * None. * * Side effects: * None. */ int xsan_setup_transport_attrs( struct Scsi_Host *shost, struct scsi_target *starget) { struct xsan_internal *i = to_xsan_internal(shost->transportt); int ret = 0; if (i->f->setup_transport_attributes) { VMKAPI_MODULE_CALL(SCSI_GET_MODULE_ID(shost), ret, i->f->setup_transport_attributes, shost, starget); } return ret; } /** * Attach pSCSI transport * Allocate and initialize all the vmklinux data structures and attach the * passed in pointer to the spi_function_template. * @ft: Pointer to the spi_function_template * * RETURN VALUE: * non-NULL is a success and is a pointer to the new template * NULL is a failure. * * ESX Deviation Notes: * This function also does the necessary initialization of data structures for * the vmklinux storage stack */ /* _VMKLNX_CODECHECK_: spi_attach_transport */ struct scsi_transport_template * spi_attach_transport(struct spi_function_template *ft) { struct vmklnx_ScsiModule *vmklnx26ScsiModule; struct spi_internal *i = kzalloc(sizeof(struct spi_internal), GFP_KERNEL); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); if (unlikely(!i)) { VMKLNX_WARN("Unable to allocate memory for spi_internal"); return NULL; } vmklnx26ScsiModule = vmklnx_alloc_scsimod(VMKLNX_SCSI_TRANSPORT_TYPE_PSCSI, i); if (!vmklnx26ScsiModule) { kfree(i); return NULL; } i->t.module = (void *)vmklnx26ScsiModule; i->t.target_size = sizeof(struct spi_transport_attrs); i->t.host_size = sizeof(struct spi_host_attrs); i->f = ft; return &i->t; } EXPORT_SYMBOL(spi_attach_transport); /** * Releases pSCSI transport * @t: pointer to scsi_transport_template * * RETURN VALUE: * None */ /* _VMKLNX_CODECHECK_: spi_release_transport */ void spi_release_transport(struct scsi_transport_template *t) { struct spi_internal *i = to_spi_internal(t); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); /* * Free up the module structure */ kfree(t->module); /* * Free up the transport_internal structure */ kfree(i); } EXPORT_SYMBOL(spi_release_transport); /* * Initialize pSCSI transport attributes * * Results: * None. * * Side effects: * None. */ int spi_setup_transport_attrs(struct scsi_target *starget) { spi_period(starget) = -1; /* illegal value */ spi_min_period(starget) = 0; spi_offset(starget) = 0; /* async */ spi_max_offset(starget) = 255; spi_width(starget) = 0; /* narrow */ spi_max_width(starget) = 1; spi_iu(starget) = 0; /* no IU */ spi_dt(starget) = 0; /* ST */ spi_qas(starget) = 0; spi_wr_flow(starget) = 0; spi_rd_strm(starget) = 0; spi_rti(starget) = 0; spi_pcomp_en(starget) = 0; spi_hold_mcs(starget) = 0; spi_dv_pending(starget) = 0; spi_dv_in_progress(starget) = 0; spi_initial_dv(starget) = 0; mutex_init(&spi_dv_mutex(starget)); spi_attr_initialized(starget) = 1; return 0; } /** spi_dv_device - Do Domain Validation on the device * @sdev: scsi device to validate * * Performs the domain validation on the given device in the * current execution thread. Since DV operations may sleep, * the current thread must have user context. Also no SCSI * related locks that would deadlock I/O issued by the DV may * be held. */ /* _VMKLNX_CODECHECK_: spi_dv_device */ void spi_dv_device(struct scsi_device *sdev) { struct scsi_target *stgt = sdev->sdev_target; u8 *buffer; const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(sdev); if (unlikely(spi_dv_in_progress(stgt))) { return; } if (unlikely(scsi_device_get(sdev))) { return; } spi_dv_in_progress(stgt) = 1; buffer = vmklnx_kzmalloc(vmklnxLowHeap, len, GFP_KERNEL); if (!buffer) { spi_dv_in_progress(stgt) = 0; scsi_device_put(sdev); return; } /* * Drain out all commands on target before we start domain validation * Also dont accept commands from the storage stack */ scsi_target_quiesce(stgt); spi_dv_pending(stgt) = 1; mutex_lock(&spi_dv_mutex(stgt)); VMKLNX_DEBUG(0, "Beginning Domain Validation"); spi_dv_device_internal(sdev, buffer); VMKLNX_DEBUG(0, "Ending Domain Validation"); mutex_unlock(&spi_dv_mutex(stgt)); spi_dv_pending(stgt) = 0; scsi_target_resume(stgt); spi_initial_dv(stgt) = 1; vmklnx_kfree(vmklnxLowHeap, buffer); spi_dv_in_progress(stgt) = 0; scsi_device_put(sdev); } EXPORT_SYMBOL(spi_dv_device); /** * scsi_device_quiesce - Block user issued commands. * @sdev: scsi device to quiesce. * @ref: not in use. * * This works by trying to transition to the SDEV_QUIESCE state * (which must be a legal transition). When the device is in this * state, only special requests will be accepted, all others will * be deferred. Unlike Linux behavior, requeues are handled * outside the vmklinux layer rather than as special requests * within the vmklinux layer. * * Returns zero if unsuccessful or an error if not. **/ void vmklnx_scsi_device_quiesce(struct scsi_device *sdev, void *ref) { sdev->sdev_state = SDEV_QUIESCE; /* * Give some time before all commands are flushed out */ while (sdev->device_busy) { /* * There is no way to know if all the commands are flushed out * This is an arbitary number that seems to work with mptspi * and aic79xx driver. Use caution when modifying this value */ if(!in_interrupt()) { msleep_interruptible(1); } else { mdelay(1); } } } /** * scsi_device_resume - Restart user issued commands to a quiesced device. * @sdev: scsi device to resume. * @ref: not in use. * * Moves the device from quiesced back to running and restarts the * queues. * * Must be called with user context, may sleep. **/ void vmklnx_scsi_device_resume(struct scsi_device *sdev, void *ref) { sdev->sdev_state = SDEV_RUNNING; } void scsi_target_quiesce(struct scsi_target *starget) { VMK_ASSERT(starget); starget_for_each_device(starget, NULL, vmklnx_scsi_device_quiesce); } void scsi_target_resume(struct scsi_target *starget) { VMK_ASSERT(starget); starget_for_each_device(starget, NULL, vmklnx_scsi_device_resume); } /** * helper to walk all devices of a target. * Using host_lock instead of reference counting * This traverses over each devices of @shost. The devices have * a reference that must be released by scsi_host_put when breaking * out of the loop. host_lock can not be held on this * @stgt: target whose devices we want to iterate over. * @data: Opaque passed to each function call. * @fn: Function to call on each device. */ /* _VMKLNX_CODECHECK_: starget_for_each_device */ void starget_for_each_device(struct scsi_target *stgt, void * data, void (*fn)(struct scsi_device *, void *)) { struct Scsi_Host *sh; struct scsi_device *sdev; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(stgt); sh = dev_to_shost(stgt->dev.parent); VMK_ASSERT(sh); shost_for_each_device(sdev, sh) { if ((sdev->channel == stgt->channel) && (sdev->id == stgt->id)) fn(sdev, data); } } EXPORT_SYMBOL(starget_for_each_device); /* * spi_dv_device_internal -- * * Process DV for pSCSI * * Results: * None. * * Side effects: * None. */ static void spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; VMK_ASSERT(sdev); VMK_ASSERT(buffer); /* * first set us up for narrow async */ DV_SET(offset, 0); DV_SET(width, 0); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) != SPI_COMPARE_SUCCESS) { VMKLNX_DEBUG(0, "Domain Validation Initial Inquiry" " Failed for adapter %s, channel %d, id %d", sdev->host->hostt->name, sdev->channel, sdev->id); return; } /* test width */ if (i->f->set_width && spi_max_width(starget) && scsi_device_wide(sdev)) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->set_width, starget, 1); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer + len, DV_LOOPS) != SPI_COMPARE_SUCCESS) { VMKLNX_DEBUG(0, "Wide transfers failed for " "adapter %s, channel %d, id %d", sdev->host->hostt->name, sdev->channel, sdev->id); VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->set_width, starget, 0); } } if (!i->f->set_period) { return; } /* device can't handle synchronous */ if (!scsi_device_sync(sdev) && !scsi_device_dt(sdev)) { return; } /* * len == -1 is the signal that we need to ascertain the * presence of an echo buffer before trying to use it. len == * 0 means we don't have an echo buffer */ len = -1; /* now set up to the maximum */ DV_SET(offset, spi_max_offset(starget)); DV_SET(period, spi_min_period(starget)); /* try QAS requests; this should be harmless to set if the * target supports it */ if (scsi_device_qas(sdev)) { DV_SET(qas, 1); } else { DV_SET(qas, 0); } if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ DV_SET(rd_strm, 1); DV_SET(wr_flow, 1); DV_SET(rti, 1); if (spi_min_period(starget) == 8) DV_SET(pcomp_en, 1); } else { DV_SET(iu, 0); } /* * now that we've done all this, actually check the bus * signal type (if known). Some devices are stupid on * a SE bus and still claim they can try LVD only settings */ if (i->f->get_signalling) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_signalling, shost); } if (spi_signalling(shost) == SPI_SIGNAL_SE || spi_signalling(shost) == SPI_SIGNAL_HVD || !scsi_device_dt(sdev)) { DV_SET(dt, 0); } else { DV_SET(dt, 1); } /* Do the read only INQUIRY tests */ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, spi_dv_device_compare_inquiry); /* See if we actually managed to negotiate and sustain DT */ if (i->f->get_dt) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_dt, starget); } /* * Linux does additional tests, like reading and writing tests * But these tests dont set any parameters in the driver. So for now * behaving as though the device has no echo buffer in place */ return; } /* * spi_dv_retrain -- * * Perform various DV * * Results: * None. * * Side effects: * None. * */ static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, enum spi_compare_returns (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; int period = 0, prevperiod = 0; enum spi_compare_returns retval; struct Scsi_Host *shost = sdev->host; for (;;) { int newperiod; retval = compare_fn(sdev, buffer, ptr, DV_LOOPS); if (retval == SPI_COMPARE_SUCCESS || retval == SPI_COMPARE_SKIP_TEST) { break; } /* OK, retrain, fallback */ if (i->f->get_iu) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_iu, starget); } if (i->f->get_qas) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_qas, starget); } if (i->f->get_period) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_period, starget); } /* * Here's the fallback sequence; first try turning off * IU, then QAS (if we can control them), then finally * fall down the periods */ if (i->f->set_iu && spi_iu(starget)) { VMKLNX_DEBUG(0, "Domain Validation disabling" " information %s, channel %d, id %d", sdev->host->hostt->name, sdev->channel, sdev->id); DV_SET(iu, 0); } else if (i->f->set_qas && spi_qas(starget)) { VMKLNX_DEBUG(0, "Domain Validation disabling" " quick arbitration selection %s, channel %d, id %d", sdev->host->hostt->name, sdev->channel, sdev->id); DV_SET(qas, 0); } else { newperiod = spi_period(starget); period = newperiod > period ? newperiod : period; if (period < 0x0d) { period++; } else { period += period >> 1; } if (unlikely(period > 0xff || period == prevperiod)) { /* Total failure; set to async and return */ VMKLNX_DEBUG(0, "Domain Validation failure, " "dropping back to async %s, channel %d, id %d", sdev->host->hostt->name, sdev->channel, sdev->id); DV_SET(offset, 0); return SPI_COMPARE_FAILURE; } VMKLNX_DEBUG(0, "Domain Validation failure," " dropping back for %s, channel %d, id %d", sdev->host->hostt->name, sdev->channel, sdev->id); DV_SET(period, period); prevperiod = period; } } return retval; } /* * This is for the simplest form of Domain Validation: a read test * on the inquiry data from the device * * Results: * None. * * Side effects: * None. * */ static enum spi_compare_returns spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int r, result, readlen = VMK_SECTOR_SIZE; /* Required for mptscsi */ const int len = sdev->inquiry_len; const char spi_inquiry[] = { INQUIRY, 0, 0, 0, len, 0 }; VMK_ASSERT(sdev); VMK_ASSERT(retries != 0); if (readlen < len) { readlen = len; } for (r = 0; r < retries; r++) { memset(ptr, 0, readlen); result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE, ptr, readlen, NULL); if(result || !scsi_device_online(sdev)) { return SPI_COMPARE_FAILURE; } /* If we don't have the inquiry data already, the * first read gets it */ if (ptr == buffer) { ptr += readlen; --r; continue; } if (memcmp(buffer, ptr, len) != 0) { /* failure */ return SPI_COMPARE_FAILURE; } } return SPI_COMPARE_SUCCESS; } /* * Send down Commands for DV */ static int spi_execute(struct scsi_device *sdev, const void *cmd, enum dma_data_direction dir, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr) { int i, result; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; VMK_ASSERT(sdev); for(i = 0; i < DV_RETRIES; i++) { result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, DV_TIMEOUT, /* retries */ 1, REQ_FAILFAST); if (result & DRIVER_SENSE) { struct scsi_sense_hdr sshdr_tmp; if (!sshdr) { sshdr = &sshdr_tmp; } if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr) && sshdr->sense_key == UNIT_ATTENTION) { continue; } } break; } return result; } /* * Print the fraction details * * Results: * None. * * Side effects: * None. */ static int sprint_frac(char *dest, int value, int denom) { int frac = value % denom; int result = sprintf(dest, "%d", value / denom); if (frac == 0) { return result; } dest[result++] = '.'; do { denom /= 10; sprintf(dest + result, "%d", frac / denom); result++; frac %= denom; } while (frac); dest[result++] = '\0'; return result; } /** * Prints transfer details. * Each SPI port is required to maintain a transfer agreement for each * other port on the bus. This function prints a one-line summary of * the current agreement; * @starget: pointer to scsi_target * * RETURN VALUE: * None */ /* _VMKLNX_CODECHECK_: spi_display_xfer_agreement */ void spi_display_xfer_agreement(struct scsi_target *starget) { struct spi_transport_attrs *tp; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(starget); tp = (struct spi_transport_attrs *)&starget->starget_data; VMK_ASSERT(tp); if (tp->offset > 0 && tp->period > 0) { unsigned int picosec, kb100; char *scsi = "FAST-?"; char tmp[8]; if (tp->period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[tp->period]; switch (tp->period) { case 7: scsi = "FAST-320"; break; case 8: scsi = "FAST-160"; break; case 9: scsi = "FAST-80"; break; case 10: case 11: scsi = "FAST-40"; break; case 12: scsi = "FAST-20"; break; } } else { picosec = tp->period * 4000; if (tp->period < 25) { scsi = "FAST-20"; } else if (tp->period < 50) { scsi = "FAST-10"; } else scsi = "FAST-5"; } kb100 = (10000000 + picosec / 2) / picosec; if (tp->width) { kb100 *= 2; } sprint_frac(tmp, picosec, 1000); VMKLNX_DEBUG(0, "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)", scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", tp->qas ? " QAS" : "", tp->rd_strm ? " RDSTRM" : "", tp->rti ? " RTI" : "", tp->wr_flow ? " WRFLOW" : "", tp->pcomp_en ? " PCOMP" : "", tp->hold_mcs ? " HMCS" : "", tmp, tp->offset); } else { VMKLNX_DEBUG(2, "%sasynchronous", tp->width ? "wide " : ""); } } EXPORT_SYMBOL(spi_display_xfer_agreement); /* * spi_populate_ppr_msg -- Populate the message fields * * @msg: Pointer to message field * @period: Period * @offset: Offset * @width: Width * @options: Options * * RETURN VALUE: * return 8 * * Include * scsi/scsi_transport_spi.h */ int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, int options) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); msg[0] = EXTENDED_MESSAGE; msg[1] = 6; msg[2] = EXTENDED_PPR; msg[3] = period; msg[4] = 0; msg[5] = offset; msg[6] = width; msg[7] = options; return 8; } EXPORT_SYMBOL(spi_populate_ppr_msg); /* * spi_populate_width_msg -- Populate the width message fields * * @msg: Pointer to message field * @width: Width * * RETURN VALUE: * return 4 * * Include * scsi/scsi_transport_spi.h */ int spi_populate_width_msg(unsigned char *msg, int width) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); msg[0] = EXTENDED_MESSAGE; msg[1] = 2; msg[2] = EXTENDED_WDTR; msg[3] = width; return 4; } EXPORT_SYMBOL(spi_populate_width_msg); /* * spi_populate_sync_msg -- Populate the sync message fields * @msg: Pointer to message field * @period: Period * @offset: Offset * * RETURN VALUE: * return 5 * * Include * scsi/scsi_transport_spi.h */ int spi_populate_sync_msg(unsigned char *msg, int period, int offset) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); msg[0] = EXTENDED_MESSAGE; msg[1] = 3; msg[2] = EXTENDED_SDTR; msg[3] = period; msg[4] = offset; return 5; } EXPORT_SYMBOL(spi_populate_sync_msg); /** * fc_attach_transport - attaches an FC transport * @ft: functions used to communicate with driver * * Registers an FC transport with vmklinux * * RETURN VALUES: * A populated scsi_transport_template upon success, * NULL if memory could not be allocated to register the transport * * SEE ALSO: * fc_release_transport */ /* _VMKLNX_CODECHECK_: fc_attach_transport */ struct scsi_transport_template * fc_attach_transport(struct fc_function_template *ft) { struct vmklnx_ScsiModule *vmklnx26ScsiModule; struct fc_internal *i = kzalloc(sizeof(struct fc_internal), GFP_KERNEL); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(ft); VMK_ASSERT(i); if (unlikely(!i)) { VMKLNX_WARN("Unable to allocate memory for fc_internal"); return NULL; } vmklnx26ScsiModule = vmklnx_alloc_scsimod(VMKLNX_SCSI_TRANSPORT_TYPE_FC, i); if (!vmklnx26ScsiModule) { kfree(i); return NULL; } /* * FC Transport uses the shost workq for scsi scanning * rport->scan_work = consumer */ i->t.create_work_queue = 1; i->t.module = (void *)vmklnx26ScsiModule; i->t.host_size = sizeof(struct fc_host_attrs); i->t.target_size = sizeof(struct fc_starget_attrs); i->f = ft; return &i->t; } EXPORT_SYMBOL(fc_attach_transport); /** * Releases an FC transport previously registered with fc_attach_transport * @t: scsi_transport_template as returned by fc_attach_transport * * SEE ALSO: * fc_attach_transport * * RETURN VALUE: * This function does not return a value */ /* _VMKLNX_CODECHECK_: fc_release_transport */ void fc_release_transport(struct scsi_transport_template *t) { struct fc_internal *i; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(t); i = to_fc_internal(t); VMK_ASSERT(i); /* * Free up the module structure */ kfree(t->module); /* * Free up the transport_internal structure */ kfree(i); } EXPORT_SYMBOL(fc_release_transport); /* * vmklnx_fc_host_setup * * Slightly deviated version of fc_host_setup. Exports information * required by vmk_FcAdapter * * Results: * 0 on Success * * Side effects: * None */ int fc_host_setup(struct Scsi_Host *shost) { struct fc_host_attrs *fc_host; VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); /* * Set default values easily detected by the midlayer as * failure cases. The scsi lldd is responsible for initializing * all transport attributes to valid values per host. */ fc_host->node_name = -1; fc_host->port_name = -1; fc_host->permanent_port_name = -1; fc_host->supported_classes = FC_COS_UNSPECIFIED; memset(fc_host->supported_fc4s, 0, sizeof(fc_host->supported_fc4s)); fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; fc_host->maxframe_size = -1; fc_host->max_npiv_vports = 0; memset(fc_host->serial_number, 0, sizeof(fc_host->serial_number)); fc_host->port_id = -1; fc_host->port_type = FC_PORTTYPE_UNKNOWN; fc_host->port_state = FC_PORTSTATE_UNKNOWN; memset(fc_host->active_fc4s, 0, sizeof(fc_host->active_fc4s)); fc_host->speed = FC_PORTSPEED_UNKNOWN; fc_host->fabric_name = -1; memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name)); memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname)); fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN; INIT_LIST_HEAD(&fc_host->rports); INIT_LIST_HEAD(&fc_host->rport_bindings); INIT_LIST_HEAD(&fc_host->vports); fc_host->next_rport_number = 0; fc_host->next_target_id = 0; fc_host->next_vport_number = 0; fc_host->npiv_vports_inuse = 0; /* * Create a work queue to handle FC requests * Refer fc_queue_work to handle below consumers * rport_delete_work * stgt_delete_work */ snprintf(fc_host->work_q_name, FC_MAX_WORK_QUEUE_NAME, "fc_wq_%d", shost->host_no); fc_host->work_q = create_singlethread_workqueue(fc_host->work_q_name); if (!fc_host->work_q) { VMKLNX_WARN("Error: Could not create FC WQ"); return -ENOMEM; } /* * Create a work queue to handle FC devloss requests * Refer fc_queue_devloss_work for below consumers * dev_loss_work, * fail_io_work */ snprintf(fc_host->devloss_work_q_name, FC_MAX_WORK_QUEUE_NAME, "fc_dl_%d", shost->host_no); fc_host->devloss_work_q = create_singlethread_workqueue( fc_host->devloss_work_q_name); if (!fc_host->devloss_work_q) { VMKLNX_WARN("Error: Could not create FC Devloss WQ"); destroy_workqueue(fc_host->work_q); fc_host->work_q = NULL; return -ENOMEM; } /* * This flag is used to catch cases where * some drivers tend to do rport operation * after fc_host is destroyed */ atomic_set(&fc_host->vmklnx_flag, VMKLNX_FC_HOST_READY); return 0; } /* * vmklnx_fc_host_setup * * Sets ups FC adapters and corresponding mgmt information * * Results: * 0 on Success * * Side effects: * None */ int vmklnx_fc_host_setup(struct Scsi_Host *shost) { int status; VMK_ASSERT(shost); /* * Create Mgmt Adapter Instance to our management */ if ((status = FcLinuxAttachMgmtAdapter(shost))) { vmk_WarningMessage("Failed to attach FC attributes to VMKernel"); return status; } if ((status = fc_host_setup(shost))) { vmk_WarningMessage("Failed to setup FC attributes"); FcLinuxReleaseMgmtAdapter(shost); return status; } return 0; } /* * vmklnx_fcoe_host_setup * * Sets ups FCoE adapters and corresponding mgmt information * * Results: * 0 on Success * * Side effects: * None */ int vmklnx_fcoe_host_setup(struct Scsi_Host *shost) { int status; VMK_ASSERT(shost); /* * Create Mgmt Adapter Instance to our management */ if ((status = FcoeLinuxAttachMgmtAdapter(shost))) { vmk_WarningMessage("Failed to attach FCoE attributes to VMKernel"); return status; } if ((status = fc_host_setup(shost))) { vmk_WarningMessage("Failed to setup FCoE attributes"); FcoeLinuxReleaseMgmtAdapter(shost); return status; } return 0; } /* * fc_host_free * * Free's up resources allocated to the FC host * * Returns: * 0 on Success * * Side effects: * None */ void fc_host_free(struct Scsi_Host *shost) { struct fc_host_attrs *fc_host; struct workqueue_struct *work_q; VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); /* * Free up the WQ */ if (fc_host->work_q) { work_q = fc_host->work_q; fc_host->work_q = NULL; destroy_workqueue(work_q); } if (fc_host->devloss_work_q) { work_q = fc_host->devloss_work_q; fc_host->devloss_work_q = NULL; destroy_workqueue(work_q); } return; } /* * vmklnx_fc_host_free * * Frees up fc host information * * Results: * None * * Side effects: * None */ void vmklnx_fc_host_free(struct Scsi_Host *shost) { VMK_ASSERT(shost); fc_host_free(shost); FcLinuxReleaseMgmtAdapter(shost); return; } /* * vmklnx_fcoe_host_free * * Frees up fcoe host information * * Results: * None * * Side effects: * None */ void vmklnx_fcoe_host_free(struct Scsi_Host *shost) { VMK_ASSERT(shost); fc_host_free(shost); FcoeLinuxReleaseMgmtAdapter(shost); return; } /** * Called to terminate any fc_transport related elements * for a scsi host. * This routine is expected to be called immediately preceeding the * call from the driver to scsi_remove_host(). * @shost: Pointer to struct Scsi_Host * * RETURN VALUE: * None * * ESX Deviation Notes: * Removes vports along with rports */ /* _VMKLNX_CODECHECK_: fc_remove_host */ void fc_remove_host(struct Scsi_Host *shost) { struct fc_vport *vport, *next_vport; struct fc_rport *rport, *next_rport; struct fc_host_attrs *fc_host; unsigned long flags; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); VMKLNX_DEBUG(0, "for %s", shost->hostt->name); atomic_set(&fc_host->vmklnx_flag, VMKLNX_FC_HOST_REMOVING); spin_lock_irqsave(shost->host_lock, flags); /* Remove any vports */ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) { fc_queue_work(shost, &vport->vport_delete_work); } /* Remove any remote ports */ list_for_each_entry_safe(rport, next_rport, &fc_host->rports, peers) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; fc_queue_work(shost, &rport->rport_delete_work); } list_for_each_entry_safe(rport, next_rport, &fc_host->rport_bindings, peers) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; fc_queue_work(shost, &rport->rport_delete_work); } spin_unlock_irqrestore(shost->host_lock, flags); /* * Flush all work items */ fc_flush_devloss(shost); fc_flush_work(shost); /* flush all scan work items */ scsi_flush_work(shost); /* * The WQ associated with this request will be removed during the * scsi_host_dev_release call */ return; } EXPORT_SYMBOL(fc_remove_host); /* * allocates and creates a remote FC port. * * Results: * Pointer to new rport that is created * * Side effects: * None * */ /* _VMKLNX_CODECHECK_: fc_rport_create*/ struct fc_rport * fc_rport_create(struct Scsi_Host *shost, int channel, struct fc_rport_identifiers *ids) { struct fc_internal *fci; struct fc_host_attrs *fc_host; struct fc_rport *rport; struct device *dev; unsigned long flags; size_t size; int error; VMK_ASSERT(shost); VMK_ASSERT(shost->transportt); VMK_ASSERT(ids); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); fci = to_fc_internal(shost->transportt); size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size); rport = (struct fc_rport *) kzalloc(size, GFP_KERNEL); if (unlikely(!rport)) { VMKLNX_WARN("allocation failure"); return NULL; } VMKLNX_DEBUG(2, "Start"); rport->maxframe_size = -1; rport->supported_classes = FC_COS_UNSPECIFIED; rport->dev_loss_tmo = fc_dev_loss_tmo; memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; rport->roles = ids->roles; rport->port_state = FC_PORTSTATE_ONLINE; if (fci->f->dd_fcrport_size) { rport->dd_data = &rport[1]; } rport->channel = channel; rport->fast_io_fail_tmo = -1; INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); spin_lock_irqsave(shost->host_lock, flags); rport->number = fc_host->next_rport_number++; if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { rport->scsi_target_id = fc_host->next_target_id++; } else { rport->scsi_target_id = -1; } /* * Add the rport to the list */ list_add_tail(&rport->peers, &fc_host->rports); get_device(&shost->shost_gendev); /* for fc_host->rport list */ spin_unlock_irqrestore(shost->host_lock, flags); /* * Fill in references for the parents. This is used by scsi_alloc_target */ dev = &rport->dev; device_initialize(dev); dev->parent = get_device(&shost->shost_gendev); /* parent reference */ dev->dev_type = FC_RPORT_TYPE; dev->release = fc_rport_dev_release; sprintf(dev->bus_id, "rport-%d:%d-%d", shost->host_no, channel, rport->number); error = device_add(dev); if (error) { VMKLNX_WARN("FC Remote Port device_add failed"); spin_lock_irqsave(shost->host_lock, flags); list_del(&rport->peers); put_device(&shost->shost_gendev); /* for fc_host->rport list */ spin_unlock_irqrestore(shost->host_lock, flags); put_device(dev->parent); kfree(rport); rport = NULL; goto exit_rport_create; } if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { /* initiate a scan of the target */ rport->flags |= FC_RPORT_SCAN_PENDING; vmk_AtomicInc64(&shost->pendingScanWorkQueueEntries); scsi_queue_work(shost, &rport->scan_work); } exit_rport_create: VMKLNX_DEBUG(2, "End"); return rport; } /** * notifies the fc transport of the existence * of a remote FC port. * * The LLDD calls this routine to notify the transport of the existence * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn) * of the port, its FC address (port_id), and the FC4 roles that are * active for the port. * * For ports that are FCP targets (aka scsi targets), the FC transport * maintains consistent target id bindings on behalf of the LLDD. * A consistent target id binding is an assignment of a target id to * a remote port identifier, which persists while the scsi host is * attached. The remote port can disappear, then later reappear, and * its target id assignment remains the same. This allows for shifts * in FC addressing (if binding by wwpn or wwnn) with no apparent * changes to the scsi subsystem which is based on scsi host number and * target id values. Bindings are only valid during the attachment of * the scsi host. If the host detaches, then later reattaches, target * id bindings may change. Whenever a remote port is allocated, a new * fc_remote_port class device is created. This routine should not be * called from interrupt context and assumes no locks are held on entry. * The routine will search the list of remote ports it maintains * internally on behalf of consistent target id mappings. If found, the * remote port structure will be reused. Otherwise, a new remote port * structure will be allocated. * @shost: scsi host the remote port is connected to * @channel: Channel on shost port connected to * @ids: The world wide names, fc address, and FC4 port * roles for the remote port * * RETURN VALUE: * Returns a remote port structure * **/ /* _VMKLNX_CODECHECK_: fc_remote_port_add */ struct fc_rport * fc_remote_port_add(struct Scsi_Host *shost, int channel, struct fc_rport_identifiers *ids) { struct fc_internal *fci; struct fc_rport *rport; unsigned long flags; int match = 0; struct fc_host_attrs *fc_host; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(shost); VMK_ASSERT(shost->transportt); VMK_ASSERT(ids); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); if (atomic_read(&fc_host->vmklnx_flag) == VMKLNX_FC_HOST_REMOVING) { VMKLNX_WARN("ERROR: FC host '%s' is being removed" "can not add rports now", shost->hostt->name); return NULL; } fci = to_fc_internal(shost->transportt); /* ensure any stgt delete functions are done */ fc_flush_work(shost); /* * Search the list of "active" rports, for an rport that has been * deleted, but we've held off the real delete while the target * is in a "blocked" state. */ spin_lock_irqsave(shost->host_lock, flags); list_for_each_entry(rport, &fc_host->rports, peers) { if ((rport->port_state == FC_PORTSTATE_BLOCKED) && (rport->channel == channel)) { switch (fc_host->tgtid_bind_type) { case FC_TGTID_BIND_BY_WWPN: case FC_TGTID_BIND_NONE: if (rport->port_name == ids->port_name) { match = 1; } break; case FC_TGTID_BIND_BY_WWNN: if (rport->node_name == ids->node_name) { match = 1; } break; case FC_TGTID_BIND_BY_ID: if (rport->port_id == ids->port_id) { match = 1; } break; } if (match) { struct delayed_work *work = &rport->dev_loss_work; memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; rport->port_state = FC_PORTSTATE_ONLINE; rport->roles = ids->roles; spin_unlock_irqrestore(shost->host_lock, flags); if (fci->f->dd_fcrport_size) { memset(rport->dd_data, 0, fci->f->dd_fcrport_size); } VMKLNX_DEBUG(2, "Active Port. Target Id %s:%d", vmklnx_get_vmhba_name(shost), rport->scsi_target_id); /* * If we were blocked, we were a target. * If no longer a target, we leave the timer * running in case the port changes roles * prior to the timer expiring. If the timer * fires, the target will be torn down. */ if (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET)) { return rport; } /* restart the target */ /* * Stop the target timers first. Take no action * on the del_timer failure as the state * machine state change will validate the * transaction. */ if (!cancel_delayed_work(&rport->fail_io_work)) { fc_flush_devloss(shost); } if (!cancel_delayed_work(work)) { fc_flush_devloss(shost); } spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; /* initiate a scan of the target */ rport->flags |= FC_RPORT_SCAN_PENDING; vmk_AtomicInc64(&shost->pendingScanWorkQueueEntries); scsi_queue_work(shost, &rport->scan_work); spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_unblock(&rport->dev); return rport; } } } /* Search the bindings array */ if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) { /* search for a matching consistent binding */ list_for_each_entry(rport, &fc_host->rport_bindings, peers) { if (rport->channel != channel) { continue; } VMKLNX_DEBUG(2, "In passive Queue. Target Id %s:%d", vmklnx_get_vmhba_name(shost), rport->scsi_target_id); switch (fc_host->tgtid_bind_type) { case FC_TGTID_BIND_BY_WWPN: if (rport->port_name == ids->port_name) { match = 1; } break; case FC_TGTID_BIND_BY_WWNN: if (rport->node_name == ids->node_name) { match = 1; } break; case FC_TGTID_BIND_BY_ID: if (rport->port_id == ids->port_id) { match = 1; } break; case FC_TGTID_BIND_NONE: /* to keep compiler happy */ break; } if (match) { list_move_tail(&rport->peers, &fc_host->rports); break; } } if (match) { memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name)); memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name)); rport->port_id = ids->port_id; rport->roles = ids->roles; rport->port_state = FC_PORTSTATE_ONLINE; if (fci->f->dd_fcrport_size) { memset(rport->dd_data, 0, fci->f->dd_fcrport_size); } if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { /* initiate a scan of the target */ rport->flags |= FC_RPORT_SCAN_PENDING; vmk_AtomicInc64(&shost->pendingScanWorkQueueEntries); scsi_queue_work(shost, &rport->scan_work); spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_unblock(&rport->dev); } else { spin_unlock_irqrestore(shost->host_lock, flags); } VMKLNX_DEBUG(2, "Passive Port"); return rport; } } spin_unlock_irqrestore(shost->host_lock, flags); /* No consistent binding found - create new remote port entry */ rport = fc_rport_create(shost, channel, ids); vmk_ScsiAdapterEvent(((struct vmklnx_ScsiAdapter *)shost->adapter)->vmkAdapter, VMK_SCSI_ADAPTER_EVENT_FC_NEW_TARGET); return rport; } EXPORT_SYMBOL(fc_remote_port_add); /** * notifies the fc transport that a remote * port is no longer in existence * * The LLDD calls this routine to notify the transport that a remote * port is no longer part of the topology. Although a port * may no longer be part of the topology, it may persist in the remote * ports displayed by the fc_host. This is done under 2 conditions. First, * if the port was a scsi target, we delay its deletion by "blocking" it. * This allows the port to temporarily disappear, then reappear without * disrupting the SCSI device tree attached to it. During the "blocked" * period the port will still exist. * Second, if the port was a scsi target and disappears for longer than we * expect, we'll delete the port and the tear down the SCSI device tree * attached to it. However, we want to semi-persist the target id assigned * to that port if it eventually does exist. The port structure will * remain (although with minimal information) so that the target id * bindings remails. * * If the remote port is not an FCP Target, it will be fully torn down * and deallocated, including the fc_remote_port class device. * * If the remote port is an FCP Target, the port will be placed in a * temporary blocked state. From the LLDD's perspective, the rport no * longer exists. From the SCSI midlayer's perspective, the SCSI target * exists, but all sdevs on it are blocked from further I/O. We can then * expect the following two conditions. First, if the remote port does not * return (signaled by a LLDD call to fc_remote_port_add()) * within the dev_loss_tmo timeout, then the scsi target is removed, * thereby killing all outstanding i/o and removing the * scsi devices attached ot it. The port structure will be marked Not * Present and be partially cleared, leaving only enough information to * recognize the remote port relative to the scsi target id binding if * it later appears. The port will remain as long as there is a valid * binding (e.g. until the user changes the binding type or unloads the * scsi host with the binding). * * Second, if the remote port returns within the dev_loss_tmo value * (and matches according to the target id binding type), * the port structure will be reused. If it is no longer a SCSI target, * the target will be torn down. If it continues to be a SCSI target, * then the target will be unblocked (allowing i/o to be resumed), * and a scan will be activated to ensure that all luns are detected. * * This function cannot be called from interrupt context and assumes no * locks are held on entry. * @rport: The remote port that no longer exists * * RETURN VALUE: * None * * ESX Deviation Notes: * The link timeout can be set by the driver in * vmkAdapter->mgmtAdapter.t.fc->linkTimeout. * The lesser of fast IO fail timeout of the rport and the * link timeout is selected as the time period to wait before * the rport is freed. **/ /* _VMKLNX_CODECHECK_: fc_remote_port_delete */ void fc_remote_port_delete(struct fc_rport *rport) { struct Scsi_Host *shost; struct fc_internal *i; int timeout; unsigned long flags; struct vmklnx_ScsiAdapter *vmklnx26ScsiAdapter; vmk_ScsiAdapter *vmkAdapter; struct fc_host_attrs *fc_host; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(rport); shost = rport_to_shost(rport); VMK_ASSERT(shost); VMK_ASSERT(shost->transportt); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); if (atomic_read(&fc_host->vmklnx_flag) == VMKLNX_FC_HOST_REMOVING) { VMKLNX_WARN("ERROR: FC host '%s' is being removed" "rports will be deleted as part of host removal", shost->hostt->name); return; } i = to_fc_internal(shost->transportt); VMK_ASSERT(i); vmklnx26ScsiAdapter = (struct vmklnx_ScsiAdapter *) shost->adapter; VMK_ASSERT(vmklnx26ScsiAdapter); vmkAdapter = vmklnx26ScsiAdapter->vmkAdapter; VMK_ASSERT(vmkAdapter); VMK_ASSERT((vmkAdapter->mgmtAdapter.transport == VMK_STORAGE_ADAPTER_FC) || (vmkAdapter->mgmtAdapter.transport == VMK_STORAGE_ADAPTER_FCOE)); VMKLNX_DEBUG(2, "----"); /* * No need to flush the fc_host work_q's, as all adds are synchronous. * * We do need to reclaim the rport scan work element, so eventually * (in fc_rport_final_delete()) we'll flush the scsi host work_q if * there's still a scan pending. */ spin_lock_irqsave(shost->host_lock, flags); /* If no scsi target id mapping, delete it */ if (rport->scsi_target_id == -1) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; fc_queue_work(shost, &rport->rport_delete_work); spin_unlock_irqrestore(shost->host_lock, flags); VMKLNX_DEBUG(2, "Not a scsi target.. deleting----"); return; } rport->port_state = FC_PORTSTATE_BLOCKED; rport->flags |= FC_RPORT_DEVLOSS_PENDING; spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_block(&rport->dev); /* * If the user has set this value, use it. If not, * use the value set by vmklinux or the FC driver */ if (unlikely(vmkAdapter->mgmtAdapter.transport == VMK_STORAGE_ADAPTER_FCOE)) { timeout = vmkAdapter->mgmtAdapter.t.fcoe->fc.linkTimeout ? vmkAdapter->mgmtAdapter.t.fcoe->fc.linkTimeout : rport->dev_loss_tmo; } else { timeout = vmkAdapter->mgmtAdapter.t.fc->linkTimeout ? vmkAdapter->mgmtAdapter.t.fc->linkTimeout : rport->dev_loss_tmo; } /* see if we need to kill io faster than waiting for device loss */ if ((rport->fast_io_fail_tmo != -1) && (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io)) fc_queue_devloss_work(shost, &rport->fail_io_work, rport->fast_io_fail_tmo * HZ); /* cap the length the devices can be blocked until they are deleted */ if (fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ) != 1) { VMKLNX_DEBUG(2, "Failed to queue devloss----"); } else { VMKLNX_DEBUG(2, "Link Timeout = %d, Target Id %s:%d", timeout, vmklnx_get_vmhba_name(shost), rport->scsi_target_id); } vmk_ScsiAdapterEvent(((struct vmklnx_ScsiAdapter *)shost->adapter)->vmkAdapter, VMK_SCSI_ADAPTER_EVENT_FC_REMOVED_TARGET); } EXPORT_SYMBOL(fc_remote_port_delete); /** * notifies the fc transport that the roles * on a remote may have changed. * * The LLDD calls this routine to notify the transport that the roles * on a remote port may have changed. The largest effect of this is * if a port now becomes a FCP Target, it must be allocated a * scsi target id. If the port is no longer a FCP target, any * scsi target id value assigned to it will persist in case the * role changes back to include FCP Target. No changes in the scsi * midlayer will be invoked if the role changes (in the expectation * that the role will be resumed. If it doesn't normal error processing * will take place). * * Should not be called from interrupt context. * * @rport: The remote port that changed * @roles: Private (Transport-managed) Attribute * * * Notes: * This routine assumes no locks are held on entry * * RETURN VALUE: * None * * ESX Deviation Notes: * If the FC host is being removed, we do not change roles, * but return immediately **/ /* _VMKLNX_CODECHECK_: fc_remote_port_rolechg */ void fc_remote_port_rolechg(struct fc_rport *rport, u32 roles) { struct Scsi_Host *shost; struct fc_host_attrs *fc_host; unsigned long flags; int create = 0; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(rport); shost = rport_to_shost(rport); VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); if (atomic_read(&fc_host->vmklnx_flag) == VMKLNX_FC_HOST_REMOVING) { VMKLNX_WARN("ERROR: FC host '%s' is being removed" "can not role change now", shost->hostt->name); return; } spin_lock_irqsave(shost->host_lock, flags); if (roles & FC_RPORT_ROLE_FCP_TARGET) { if (rport->scsi_target_id == -1) { rport->scsi_target_id = fc_host->next_target_id++; create = 1; } else if (!(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { create = 1; } } rport->roles = roles; spin_unlock_irqrestore(shost->host_lock, flags); if (create) { /* * There may have been a delete timer running on the * port. Ensure that it is cancelled as we now know * the port is an FCP Target. * Note: we know the rport is exists and in an online * state as the LLDD would not have had an rport * reference to pass us. * * Take no action on the del_timer failure as the state * machine state change will validate the * transaction. */ if (!cancel_delayed_work(&rport->fail_io_work)) { fc_flush_devloss(shost); } if (!cancel_delayed_work(&rport->dev_loss_work)) { fc_flush_devloss(shost); } spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; spin_unlock_irqrestore(shost->host_lock, flags); /* ensure any stgt delete functions are done */ fc_flush_work(shost); /* initiate a scan of the target */ spin_lock_irqsave(shost->host_lock, flags); rport->flags |= FC_RPORT_SCAN_PENDING; vmk_AtomicInc64(&shost->pendingScanWorkQueueEntries); scsi_queue_work(shost, &rport->scan_work); spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_unblock(&rport->dev); } } EXPORT_SYMBOL(fc_remote_port_rolechg); /* * fc_timeout_deleted_rport - Timeout handler for a deleted remote port that * was a SCSI target (thus was blocked), and failed * to return in the alloted time. * * Results: * None * * Side effects: * None */ static void fc_timeout_deleted_rport(struct work_struct *work) { struct fc_rport *rport; struct Scsi_Host *shost; struct fc_host_attrs *fc_host; unsigned long flags; rport = container_of(work, struct fc_rport, dev_loss_work.work); shost = rport_to_shost(rport); VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); VMKLNX_DEBUG(2, "----"); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; /* * If the port is ONLINE, then it came back. Validate it's still an * FCP target. If not, tear down the scsi_target on it. */ if ((rport->port_state == FC_PORTSTATE_ONLINE) && !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: no longer" " a FCP target, removing starget\n"); spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_unblock(&rport->dev); fc_queue_work(shost, &rport->stgt_delete_work); return; } if (rport->port_state != FC_PORTSTATE_BLOCKED) { spin_unlock_irqrestore(shost->host_lock, flags); dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: leaving target alone\n"); return; } if (fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) { list_del(&rport->peers); rport->port_state = FC_PORTSTATE_DELETED; dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: removing target\n"); fc_queue_work(shost, &rport->rport_delete_work); spin_unlock_irqrestore(shost->host_lock, flags); return; } if (fc_remove_on_dev_loss) { dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: removing target and " "saving binding\n"); } else { dev_printk(KERN_ERR, &rport->dev, "blocked FC remote port time out: saving binding\n"); } list_move_tail(&rport->peers, &fc_host->rport_bindings); /* * Note: We do not remove or clear the hostdata area. This allows * host-specific target data to persist along with the * scsi_target_id. It's up to the host to manage it's hostdata area. */ /* * Reinitialize port attributes that may change if the port comes back. */ rport->maxframe_size = -1; rport->supported_classes = FC_COS_UNSPECIFIED; rport->roles = FC_RPORT_ROLE_UNKNOWN; rport->port_state = FC_PORTSTATE_NOTPRESENT; /* remove the identifiers that aren't used in the consisting binding */ switch (fc_host->tgtid_bind_type) { case FC_TGTID_BIND_BY_WWPN: rport->node_name = -1; rport->port_id = -1; break; case FC_TGTID_BIND_BY_WWNN: rport->port_name = -1; rport->port_id = -1; break; case FC_TGTID_BIND_BY_ID: rport->node_name = -1; rport->port_name = -1; break; case FC_TGTID_BIND_NONE: /* to keep compiler happy */ break; } /* * As this only occurs if the remote port (scsi target) * went away and didn't come back - we'll remove * all attached scsi devices. */ spin_unlock_irqrestore(shost->host_lock, flags); scsi_target_unblock(&rport->dev); fc_queue_work(shost, &rport->stgt_delete_work); } /* * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a * disconnected SCSI target. * * Results: * None * * Side effects: * None */ static void fc_timeout_fail_rport_io(struct work_struct *work) { struct fc_rport *rport; struct Scsi_Host *shost; struct fc_internal *i; rport = container_of(work, struct fc_rport, fail_io_work.work); shost = rport_to_shost(rport); VMK_ASSERT(shost); VMK_ASSERT(shost->transportt); i = to_fc_internal(shost->transportt); VMK_ASSERT(i); VMKLNX_DEBUG(2, "----"); if (rport->port_state != FC_PORTSTATE_BLOCKED) { return; } VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->terminate_rport_io, rport); } /* * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. * * Results: * None * * Side effects: * None */ static void fc_scsi_scan_rport(struct work_struct *work) { struct fc_rport *rport; struct Scsi_Host *shost; unsigned long flags; rport = container_of(work, struct fc_rport, scan_work); shost = rport_to_shost(rport); VMK_ASSERT(shost); if ((rport->port_state == FC_PORTSTATE_ONLINE) && (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { scsi_scan_target(&rport->dev, rport->channel, rport->scsi_target_id, SCAN_WILD_CARD, 1); } spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_SCAN_PENDING; vmk_AtomicDec64(&shost->pendingScanWorkQueueEntries); spin_unlock_irqrestore(shost->host_lock, flags); } /** * scsi_is_fc_rport - Check if the scsi device is fc rport * @dev: scsi device * * Check if the scsi device is fc rport * * RETURN VALUE: * TRUE if the device is a fc rport, FALSE otherwise */ /* _VMKLNX_CODECHECK_: scsi_is_fc_rport */ int scsi_is_fc_rport(const struct device *dev) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); return dev->dev_type == FC_RPORT_TYPE; } EXPORT_SYMBOL(scsi_is_fc_rport); /** * fc_get_event_number - obtain the next sequential FC event number * * Returns the next sequential FC event number * * RETURN VALUE: * The next sequential FC event number */ /* _VMKLNX_CODECHECK_: fc_get_event_number */ u32 fc_get_event_number(void) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); return atomic_add_return(1, &fc_event_seq); } EXPORT_SYMBOL(fc_get_event_number); /** * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. * @shost: ignored * @event_number: ignored * @data_len: ignored * @data_buf: ignored * @vendor_id: ignored * * ESX Deviation Notes: * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. * * RETURN VALUE: * This function does not return a value */ /* _VMKLNX_CODECHECK_: fc_host_post_vendor_event */ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, u32 data_len, char * data_buf, u64 vendor_id) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(shost); VMKLNX_DEBUG(5, "Netlink is not supported in vmklinux " "So no notifications were sent"); return; } EXPORT_SYMBOL(fc_host_post_vendor_event); /** * This function is not implemented. * * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. * @shost: ignored * @event_number: ignored * @event_code: ignored * @event_data: ignored * * RETURN VALUE: * This function does not return a value * * ESX Deviation Notes: * This function is a non-operational function provided to help reduce * kernel ifdefs. It is not supported in this release of ESX. */ /* _VMKLNX_CODECHECK_: fc_host_post_event */ void fc_host_post_event(struct Scsi_Host *shost, u32 event_number, enum fc_host_event_code event_code, u32 event_data) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(shost); VMKLNX_DEBUG(5, "Netlink is not supported in vmklinux " "So no notifications were sent up [%x]", event_code); return; } EXPORT_SYMBOL(fc_host_post_event); /* * fc_queue_work -- * * Queue work to the fc_host workqueue. * * Results: * 1 - work queued for execution * 0 - work is already queued * -EINVAL - work queue doesn't exist * * Side effects: * None */ static int fc_queue_work(struct Scsi_Host *shost, struct work_struct *work) { VMK_ASSERT(shost); VMK_ASSERT(work); if (unlikely(!fc_host_work_q(shost))) { VMKLNX_WARN("ERROR: FC host '%s' attempted to queue work, " "when no workqueue created.", shost->hostt->name); return -EINVAL; } return queue_work(fc_host_work_q(shost), work); } /* * fc_flush_work -- * * Flush a fc_host's workqueue. * * Results: * None * * Side effects: * None */ static void fc_flush_work(struct Scsi_Host *shost) { VMK_ASSERT(shost); if (!fc_host_work_q(shost)) { VMKLNX_WARN("ERROR: FC host '%s' attempted to flush work, " "when no workqueue created.", shost->hostt->name); return; } flush_workqueue(fc_host_work_q(shost)); } /* * fc_queue_devloss_work -- * * Schedule work for the fc_host devloss workqueue. * * Results: * 1 on success / 0 already queued / < 0 for error * * Side effects: * None */ static int fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *dwork, unsigned long delay) { VMK_ASSERT(shost); VMK_ASSERT(dwork); if (unlikely(!fc_host_devloss_work_q(shost))) { VMKLNX_WARN("ERROR: FC host '%s' attempted to queue work, " "when no workqueue created.", shost->hostt->name); return -EINVAL; } #if defined(VMX86_DEBUG) if (delay != 0) { /* * Log the delay */ VMKLNX_DEBUG(2, "Delayed work = 0x%"VMK_FMT64"x", delay); } #endif /* defined(VMX86_DEBUG) */ return queue_delayed_work(fc_host_devloss_work_q(shost), dwork, delay); } /* * fc_flush_devloss --- * * Flush a fc_host's devloss workqueue. * * Results: * None * * Side effects: * None */ static void fc_flush_devloss(struct Scsi_Host *shost) { VMK_ASSERT(shost); if (!fc_host_devloss_work_q(shost)) { VMKLNX_WARN("ERROR: FC host '%s' attempted to flush work, " "when no workqueue created.", shost->hostt->name); return; } flush_workqueue(fc_host_devloss_work_q(shost)); } /* * fc_starget_delete --- * * Called to delete the scsi decendents of an rport (target and all sdevs) * * Results: * None * * Side effects: * None */ static void fc_starget_delete(struct work_struct *work) { struct fc_rport *rport; struct Scsi_Host *shost; unsigned long flags; struct fc_internal *i; struct fc_host_attrs *fc_host; rport = container_of(work, struct fc_rport, stgt_delete_work); shost = rport_to_shost(rport); VMK_ASSERT(shost); VMK_ASSERT(shost->transportt); fc_host = shost_to_fc_host(shost); i = to_fc_internal(shost->transportt); VMK_ASSERT(i); /* * Involve the LLDD if possible. All io on the rport is to * be terminated, either as part of the dev_loss_tmo callback * processing, or via the terminate_rport_io function. */ if (i->f->dev_loss_tmo_callbk) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->dev_loss_tmo_callbk, rport); } else if (i->f->terminate_rport_io) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->terminate_rport_io, rport); } spin_lock_irqsave(shost->host_lock, flags); if (rport->flags & FC_RPORT_DEVLOSS_PENDING) { spin_unlock_irqrestore(shost->host_lock, flags); if (!cancel_delayed_work(&rport->fail_io_work)) { fc_flush_devloss(shost); } if (!cancel_delayed_work(&rport->dev_loss_work)) { fc_flush_devloss(shost); } spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; } spin_unlock_irqrestore(shost->host_lock, flags); /* * Try to remove the target when fc_host is in * the process of being released */ if ((fc_remove_on_dev_loss) || (atomic_read(&fc_host->vmklnx_flag) == VMKLNX_FC_HOST_REMOVING)) { scsi_remove_target(&rport->dev); } else { VMKLNX_DEBUG(2, "Marking the target offline"); vmklnx_scsi_target_offline(&rport->dev); } } /* * fc_rport_final_delete --- * finish rport termination and delete it. * * Results: * None * * Side effects: * None */ static void fc_rport_final_delete(struct work_struct *work) { struct fc_rport *rport; struct Scsi_Host *shost; struct fc_internal *i; struct device *dev; rport = container_of(work, struct fc_rport, rport_delete_work); shost = rport_to_shost(rport); VMK_ASSERT(shost); VMK_ASSERT(shost->transportt); i = to_fc_internal(shost->transportt); VMK_ASSERT(i); dev = &rport->dev; /* * if a scan is pending, flush the SCSI Host work_q so that * that we can reclaim the rport scan work element. */ if (rport->flags & FC_RPORT_SCAN_PENDING) { scsi_flush_work(shost); } /* Delete SCSI target and sdevs */ if (rport->scsi_target_id != -1) { fc_starget_delete(&rport->stgt_delete_work); } else if (i->f->dev_loss_tmo_callbk) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->dev_loss_tmo_callbk, rport); } else if (i->f->terminate_rport_io) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->terminate_rport_io, rport); } device_del(dev); put_device(&shost->shost_gendev); /* for fc_host->rport list */ put_device(dev); /* for self-reference */ } /* * NPIV support code */ /* * vmk_fc_vport_create - allocates and creates a FC virtual port. * Allocates and creates the vport structure, calls the parent host * to instantiate the vport, the completes w/ class and sysfs creation. * @shost: physical host to create vport on * @pdev: parent device * @vport_shost: pointer to callers vport_shost * * Notes: * This routine assumes no locks are held on entry. **/ int vmk_fc_vport_create(struct Scsi_Host *shost, struct device *pdev, void *data, void **vport_shost) { struct fc_host_attrs *fc_host; struct fc_internal *fci; struct fc_vport *vport; struct device *dev; struct vmk_ScsiVportArgs *args; unsigned long flags; size_t size; int error; VMK_ASSERT(shost); VMK_ASSERT(pdev); args = (vmk_ScsiVportArgs *) data; VMK_ASSERT(args); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); fci = to_fc_internal(shost->transportt); VMK_ASSERT(fci); if ( ! fci->f->vport_create) { return -ENOENT; } size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size); vport = (struct fc_vport *)kzalloc(size, GFP_KERNEL); if (unlikely(!vport)) { VMKLNX_DEBUG(2, "allocation failure"); return -ENOMEM; } vport->vport_state = FC_VPORT_UNKNOWN; vport->vport_last_state = FC_VPORT_UNKNOWN; vport->node_name = wwn_to_u64(args->wwnn); vport->port_name = wwn_to_u64(args->wwpn); // we are always an initiator for now vport->roles = FC_PORT_ROLE_FCP_INITIATOR; vport->vport_type = FC_PORTTYPE_NPIV; if (fci->f->dd_fcvport_size) { vport->dd_data = &vport[1]; } vport->shost = shost; vport->channel = 0; vport->flags = FC_VPORT_CREATING; if (args->flags & VMK_SCSI_VPORT_FLAG_LEGACY) { vport->flags |= FC_VPORT_LEGACY; } INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete); spin_lock_irqsave(shost->host_lock, flags); if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) { spin_unlock_irqrestore(shost->host_lock, flags); kfree(vport); return -ENOSPC; } fc_host->npiv_vports_inuse++; vport->number = fc_host->next_vport_number++; list_add_tail(&vport->peers, &fc_host->vports); get_device(&shost->shost_gendev); /* for fc_host->vport list */ spin_unlock_irqrestore(shost->host_lock, flags); dev = &vport->dev; device_initialize(dev); /* takes self reference */ dev->parent = get_device(pdev); /* takes parent reference */ dev->dev_type = FC_VPORT_TYPE; dev->release = fc_vport_dev_release; sprintf(dev->bus_id, "vport-%d:%d-%d", shost->host_no, 0, vport->number); error = device_add(dev); if (error) { VMKLNX_DEBUG(0, "FC Virtual Port device_add failed"); goto delete_vport; } /* * call the driver to do the actual virtual port create * this in turn will create a Scsi_Host struct to * represent this vport */ VMKAPI_MODULE_CALL(SCSI_GET_MODULE_ID(shost), error, fci->f->vport_create, vport, FALSE); if (error) { VMKLNX_DEBUG(0, "FC Virtual Port LLDD Create failed"); goto delete_vport_all; } spin_lock_irqsave(shost->host_lock, flags); vport->flags &= ~FC_VPORT_CREATING; spin_unlock_irqrestore(shost->host_lock, flags); VMKLNX_DEBUG(0, "%s created via shost%d channel %d", dev->bus_id, shost->host_no, 0); // need to return the vport shost pointer *vport_shost = vport->vhost; vmk_ScsiAdapterEvent(((struct vmklnx_ScsiAdapter *)shost->adapter)->vmkAdapter, VMK_SCSI_ADAPTER_EVENT_FC_NEW_VPORT); return 0; delete_vport_all: device_del(dev); delete_vport: spin_lock_irqsave(shost->host_lock, flags); list_del(&vport->peers); put_device(&shost->shost_gendev); /* for fc_host->vport list */ fc_host->npiv_vports_inuse--; spin_unlock_irqrestore(shost->host_lock, flags); put_device(dev->parent); kfree(vport); return error; } int vmk_fc_vport_delete(struct Scsi_Host *shost) { struct fc_vport *vport; VMK_ASSERT(shost); if (shost->shost_gendev.parent->dev_type != FC_VPORT_TYPE) { return -ENOENT; } vport = dev_to_vport(shost->shost_gendev.parent); VMK_ASSERT(vport); return (vmk_fc_vport_terminate(vport)); } /** * fc_vport_sched_delete - workq-based delete request for a vport * * @work: vport to be deleted. **/ static void fc_vport_sched_delete(struct work_struct *work) { struct fc_vport *vport; int stat; vport = container_of(work, struct fc_vport, vport_delete_work); VMK_ASSERT(vport); stat = vmk_fc_vport_terminate(vport); if (stat) { VMKLNX_DEBUG(0, "%s could not be deleted created via " "shost%d channel %d - error %d", vport->dev.bus_id, vport->shost->host_no, vport->channel, stat); } } /* * Calls the LLDD vport_delete() function, then deallocates and removes * the vport from the shost and object tree. * * Notes: * This routine assumes no locks are held on entry. **/ static int vmk_fc_vport_terminate(struct fc_vport *vport) { struct Scsi_Host *shost; struct fc_host_attrs *fc_host; struct fc_internal *i; struct device *dev; unsigned long flags; int stat; VMK_ASSERT(vport); shost = vport_to_shost(vport); VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); i = to_fc_internal(shost->transportt); VMK_ASSERT(i); dev = &vport->dev; VMK_ASSERT(dev); spin_lock_irqsave(shost->host_lock, flags); if (vport->flags & FC_VPORT_CREATING) { spin_unlock_irqrestore(shost->host_lock, flags); return -EBUSY; } if (vport->flags & (FC_VPORT_DEL)) { spin_unlock_irqrestore(shost->host_lock, flags); return -EALREADY; } vport->flags |= FC_VPORT_DELETING; spin_unlock_irqrestore(shost->host_lock, flags); if (i->f->vport_delete) { VMKAPI_MODULE_CALL(SCSI_GET_MODULE_ID(shost), stat, i->f->vport_delete, vport); } else { stat = -ENOENT; } spin_lock_irqsave(shost->host_lock, flags); vport->flags &= ~FC_VPORT_DELETING; if (!stat) { vport->flags |= FC_VPORT_DELETED; list_del(&vport->peers); fc_host->npiv_vports_inuse--; put_device(&shost->shost_gendev); /* for fc_host->vport list */ } spin_unlock_irqrestore(shost->host_lock, flags); if (stat) return stat; device_del(dev); /* * Removing our self-reference should mean our * release function gets called, which will drop the remaining * parent reference and free the data structure. */ put_device(dev); /* for self-reference */ vmk_ScsiAdapterEvent(((struct vmklnx_ScsiAdapter *)shost->adapter)->vmkAdapter, VMK_SCSI_ADAPTER_EVENT_FC_REMOVED_VPORT); return 0; /* SUCCESS */ } /* * vmk_fc_vport_getinfo * */ int vmk_fc_vport_getinfo(struct Scsi_Host *shost, void *info) { struct vmk_VportInfo *pi; struct fc_host_attrs *fc_host; struct fc_vport *vport; struct fc_internal *i; VMK_ASSERT(shost); if ((struct vmklnx_ScsiModule *)shost->transportt->module == NULL || ((struct vmklnx_ScsiModule *)shost->transportt->module)->transportType != VMKLNX_SCSI_TRANSPORT_TYPE_FC ) { VMKLNX_DEBUG(2, "vmk_fc_vport_getinfo: not supported: shost=%p", shost); return -ENOENT; } i = to_fc_internal(shost->transportt); VMK_ASSERT(i); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); if (fc_host->port_type == FC_PORTTYPE_UNKNOWN) { if (i->f && i->f->get_host_port_type) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_host_port_type, shost); } /* If port type is still unknown, bail out */ if (fc_host->port_type == FC_PORTTYPE_UNKNOWN) { VMKLNX_DEBUG(2, "FC port type is unknown shost=%p", shost); return -ENOENT; } } #if 0 /* * make sure this is not a vport before we check for vport capability */ if (i->f && (fc_host->port_type != FC_PORTTYPE_NPIV && (! i->f->vport_create))) { VMKLNX_DEBUG(2, "vmk_fc_vport_getinfo: not supported2:" " shost=%p, i=%p, f=%p", shost, i, i->f); return -ENOENT; } #endif VMK_ASSERT(info); pi = (struct vmk_VportInfo *) info; VMKLNX_DEBUG(2, "vmk_fc_vport_getinfo: shost=%p, info=%p", shost, info); /* * If this is not a physical Host, fill in something else */ if (fc_host->port_type == FC_PORTTYPE_NPIV) { vport = dev_to_vport(&shost->shost_gendev); VMK_ASSERT(vport); pi->linktype = VMK_VPORT_TYPE_VIRTUAL; pi->vports_max = VMK_VPORT_CNT_INVALID; pi->vports_inuse = VMK_VPORT_CNT_INVALID; u64_to_wwn(vport->node_name, pi->node_name); u64_to_wwn(vport->port_name, pi->port_name); goto check_state; } VMKLNX_DEBUG(2, "vmk_fc_vport_getinfo: physical port: shost=%p, info=%p", shost, info); /* * must be a physical port * just fill in the important blanks */ pi->vports_max = fc_host->max_npiv_vports; /* * PR 266660 - some drivers set max_npiv_vports * to VMK_VPORT_CNT_INVALID from a u16. * max_npiv_vports is a u32 and so is vports_max. */ if (pi->vports_max == 0xffff) { VMKLNX_DEBUG(0, "vmk_fc_vport_getinfo: PR 266660: vports_max=%x", pi->vports_max); pi->vports_max = VMK_VPORT_CNT_INVALID; } pi->vports_inuse = fc_host->npiv_vports_inuse; pi->linktype = VMK_VPORT_TYPE_PHYSICAL; pi->fail_reason = VMK_VPORT_FAIL_UNKNOWN; pi->prev_fail_reason = VMK_VPORT_FAIL_UNKNOWN; u64_to_wwn(fc_host->node_name, pi->node_name); u64_to_wwn(fc_host->port_name, pi->port_name); /* * check the state */ check_state: if (i->f->get_host_port_state) { VMKAPI_MODULE_CALL_VOID(SCSI_GET_MODULE_ID(shost), i->f->get_host_port_state, shost); } switch (fc_host->port_state) { case FC_PORTSTATE_OFFLINE: pi->state = VMK_VPORT_STATE_OFFLINE; break; case FC_PORTSTATE_ONLINE: pi->state = VMK_VPORT_STATE_ACTIVE; break; case FC_PORTSTATE_NOTPRESENT: case FC_PORTSTATE_BLOCKED: default: pi->state = VMK_VPORT_STATE_FAILED; break; } return 0; } /* * vmk_fc_vport_suspend * Suspend/Resume vport = Disable/Enable vport */ int vmk_fc_vport_suspend(struct Scsi_Host *shost, int suspend) { struct fc_vport *vport; int error; struct fc_host_attrs *fc_host; struct fc_internal *fci; bool disable; VMK_ASSERT(shost); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); fci = to_fc_internal(shost->transportt); VMK_ASSERT(fci); if (!fci->f->vport_disable) { return -ENOENT; } if (shost->shost_gendev.parent->dev_type != FC_VPORT_TYPE) { return -ENOENT; } vport = dev_to_vport(shost->shost_gendev.parent); VMK_ASSERT(vport); disable = (suspend != 0) ? TRUE : FALSE; VMKAPI_MODULE_CALL(SCSI_GET_MODULE_ID(shost), error, fci->f->vport_disable, vport, disable); return error; } /* * SAS transport functions */ /* * SAS host attributes */ /* * vmklnx_sas_host_setup -- initialize sas_host_attrs * @shost: pointer to a Scsi_Host structure * * RETURN VALUE: * 0 for SUCCESS; negative values if failed * * Include: * scsi/scsi_transport_sas.h */ int vmklnx_sas_host_setup(struct Scsi_Host *shost) { struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); if (sas_host) { if ((sas_host->rphy_list.prev == NULL) || (sas_host->rphy_list.next == NULL)) { /* * sas_host_attrs has not been initialized yet. */ INIT_LIST_HEAD(&sas_host->rphy_list); mutex_init(&sas_host->lock); sas_host->next_target_id = 0; INIT_LIST_HEAD(&sas_host->freed_rphy_list); sas_host->next_expander_id = 0; sas_host->next_port_id = 0; /* * Create Mgmt Adapter Instance to our management */ if (SasLinuxAttachMgmtAdapter(shost)) { return -ENOMEM; } } return 0; } else { return(-1); } } /** * sas_phy_free - free a SAS PHY * @phy: SAS PHY to free * * Frees the specified SAS PHY. * */ /* _VMKLNX_CODECHECK_: sas_phy_free */ void sas_phy_free(struct sas_phy *phy) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); } EXPORT_SYMBOL(sas_phy_free); /** * sas_phy_add - add a SAS to the device hierarchy * @phy: a pointer to a sas_phy structure * * Adds a SAS to the device hierarchy. * * RETURN VALUE: * 0 if success; otherwise -errno. */ /* _VMKLNX_CODECHECK_: sas_phy_add */ int sas_phy_add(struct sas_phy *phy) { int error; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); error = device_add(&phy->dev); return error; } EXPORT_SYMBOL(sas_phy_add); /** * sas_phy_alloc - allocate an intialize a SAS PHY structure * @parent: parent device to connect sas phy structure to * @number: a PHY index * * Allocates and intializes a SAS PHY structure. It will be added as a child in * the device tree to the specified @parent. @parent must be a Scsi_Host or * sas_rphy * */ /* _VMKLNX_CODECHECK_: sas_phy_alloc */ struct sas_phy *sas_phy_alloc(struct device *parent, int number) { struct Scsi_Host *shost; struct sas_phy *phy; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); shost = dev_to_shost(parent); VMK_ASSERT(shost); phy = (struct sas_phy *)kzalloc(sizeof(struct sas_phy), GFP_KERNEL); if (!phy) return NULL; device_initialize(&phy->dev); phy->number = number; phy->dev.parent = get_device(parent); phy->dev.dev_type = SAS_PHY_DEVICE_TYPE; phy->dev.release = sas_phy_release; INIT_LIST_HEAD(&phy->port_siblings); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); sprintf(phy->dev.bus_id, "phy-%d:%d:%d", shost->host_no, rphy->scsi_target_id, number); } else sprintf(phy->dev.bus_id, "phy-%d:%d", shost->host_no, number); return phy; } EXPORT_SYMBOL(sas_phy_alloc); /** * sas_rphy_free - free a SAS remote PHY * @rphy: SAS remote PHY to free * * Frees the specified SAS remote PHY. * This function must only be called on a remote PHY that has not sucessfully * been added using sas_rphy_add(). * * RETURN VALUE: * None */ /* _VMKLNX_CODECHECK_: sas_rphy_free */ void sas_rphy_free(struct sas_rphy *rphy) { struct device *dev; struct Scsi_Host *shost = NULL; struct sas_host_attrs *sas_host = NULL; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(rphy); dev = &rphy->dev; shost = dev_to_shost(rphy->dev.parent->parent); VMK_ASSERT(shost); sas_host = to_sas_host_attrs(shost); VMK_ASSERT(sas_host); mutex_lock(&sas_host->lock); list_del(&rphy->list); mutex_unlock(&sas_host->lock); put_device(dev); } EXPORT_SYMBOL(sas_rphy_free); /* * sas_scsi_target_reparent - reparent the scsi_target to new rphy passed in */ static int sas_scsi_target_reparent(struct device *dev, void *data) { struct scsi_target *stgt = to_scsi_target(dev); struct sas_rphy *new_rphy = (struct sas_rphy *)data; int error; VMKLNX_DEBUG(5, "Entered for dev: %p stgt: %p shost:%p " "new_rphy: %p", dev, stgt, dev_to_shost(dev), new_rphy); if (dev && stgt) { VMKLNX_DEBUG(5, "stgt: %p - refcount on " "entry:%d parent:%p", stgt, atomic_read(&stgt->dev.kref.refcount), dev->parent); /* reparent the scsi_target to the new rphy parent */ get_device(dev); device_del(dev); dev->parent = get_device(&new_rphy->dev); error = device_add(dev); if (error) { VMKLNX_DEBUG(1, "stgt: %p - device_add to new rphy failed", stgt); } scsi_target_unblock(&stgt->dev); put_device(dev); VMKLNX_DEBUG(5, "stgt: %p - refcount on " "exit:%d parent:%p", stgt, atomic_read(&stgt->dev.kref.refcount), dev->parent); } return 0; } /* * sas_assign_scsi_target_id - assign a scsi target id to the SAS device * * if previously removed dev * reparent to new rphy and (re)assign old target id * else * assign next target id * * Returns 1 if a new id was assigned and 0 if none */ static int sas_assign_scsi_target_id(struct sas_rphy *rphy, struct sas_host_attrs *sas_host) { struct sas_rphy *t_rphy; struct sas_port *parent; struct Scsi_Host *shost; VMKLNX_DEBUG(5, "Looking for sas_address 0x%llx rphy:%p", rphy->identify.sas_address, rphy); VMK_ASSERT(rphy); parent = dev_to_sas_port(rphy->dev.parent); VMK_ASSERT(parent); shost = dev_to_shost(parent->dev.parent); VMK_ASSERT(shost); list_for_each_entry(t_rphy, &sas_host->freed_rphy_list, list) { VMKLNX_DEBUG(5, "Examining sas_address 0x%llx " "(matching with 0x%llx) t_rphy:%p rphy:%p", t_rphy->identify.sas_address, rphy->identify.sas_address, t_rphy, rphy); if (t_rphy->identify.sas_address == rphy->identify.sas_address) { /* * The children of this rphy are scsi_target structs. * Walk every child and reparent it to new rphy */ device_for_each_child_safe(&t_rphy->dev, rphy, sas_scsi_target_reparent); VMKLNX_DEBUG(5, "Assigning target id %d for sas_address 0x%llx from " "freed list; old rphy->scsi_target_id:0x%d; rphy:0x%p " "t_rphy:0x%p", t_rphy->scsi_target_id, rphy->identify.sas_address, rphy->scsi_target_id, rphy, t_rphy); /* Now assign the old target id to the new rphy */ rphy->scsi_target_id = t_rphy->scsi_target_id; list_del(&t_rphy->list); VMKLNX_DEBUG(5, "t_rphy: %p - refcount before " "put_device: %d", t_rphy, atomic_read(&t_rphy->dev.kref.refcount)); /* There is a new rphy now. The old one can now be released */ put_device(&t_rphy->dev); return 0; } } rphy->scsi_target_id = sas_host->next_target_id++; VMKLNX_DEBUG(5, "Assigning target id %d for sas_address " "0x%llx NOT from freed list", rphy->scsi_target_id, rphy->identify.sas_address); return 1; } /** * Add a SAS remote PHY to the device hierachy * * Publishes a SAS remote PHY to the rest of the system. * Assumes sas_host->lock is held as needed * @rphy: The remote PHY to be added * * RETURN VALUE: * 0 on success * negative errno code on error */ /* _VMKLNX_CODECHECK_: sas_rphy_add */ int sas_rphy_add(struct sas_rphy *rphy) { struct sas_port *parent; struct Scsi_Host *shost; struct sas_host_attrs *sas_host; struct sas_identify *identify; int error; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(rphy); parent = dev_to_sas_port(rphy->dev.parent); VMK_ASSERT(parent); shost = dev_to_shost(parent->dev.parent); VMK_ASSERT(shost); sas_host = to_sas_host_attrs(shost); identify = &rphy->identify; if (parent->rphy) return -ENXIO; parent->rphy = rphy; error = device_add(&rphy->dev); if (error) { VMKLNX_DEBUG(1, "rphy: %p device_add failed with %d", rphy, error); return error; } mutex_lock(&sas_host->lock); list_add_tail(&rphy->list, &sas_host->rphy_list); if (identify->device_type == SAS_END_DEVICE && (identify->target_port_protocols & (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA))) { sas_assign_scsi_target_id(rphy, sas_host); } else if (identify->device_type == SAS_END_DEVICE) rphy->scsi_target_id = -1; mutex_unlock(&sas_host->lock); if (identify->device_type == SAS_END_DEVICE && rphy->scsi_target_id != -1) { scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, ~0, 1); } return 0; } EXPORT_SYMBOL(sas_rphy_add); /** * Removes the specified SAS remote PHY. * @rphy: SAS remote PHY to remove * * RETURN VALUE: * None */ /* _VMKLNX_CODECHECK_: sas_rphy_delete */ void sas_rphy_delete(struct sas_rphy *rphy) { struct device *dev = &rphy->dev; struct sas_port *parent = dev_to_sas_port(dev->parent); struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); if (NULL == sas_host) { return; } switch (rphy->identify.device_type) { case SAS_END_DEVICE: break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: break; default: break; } device_del(dev); mutex_lock(&sas_host->lock); list_del(&rphy->list); list_add_tail(&rphy->list, &sas_host->freed_rphy_list); vmklnx_scsi_target_offline(&rphy->dev); VMKLNX_DEBUG(3, "rphy:%p - Added target id %d for " "sas_address 0x%llx to freed list device_type:%d", rphy, rphy->scsi_target_id, rphy->identify.sas_address, rphy->identify.device_type); mutex_unlock(&sas_host->lock); parent->rphy = NULL; } EXPORT_SYMBOL(sas_rphy_delete); static void sas_rphy_initialize(struct sas_rphy *rphy) { INIT_LIST_HEAD(&rphy->list); } /** * Allocate a SAS rphy and connect to its parent device * @parent: parent to connect SAS rphy port to * */ /* _VMKLNX_CODECHECK_: sas_end_device_alloc */ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent) { struct Scsi_Host *shost; struct sas_end_device *rdev; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(parent); shost = dev_to_shost(&parent->dev); VMK_ASSERT(shost); rdev = (struct sas_end_device *) kzalloc(sizeof(struct sas_end_device), GFP_KERNEL); if (!rdev) { return NULL; } device_initialize(&rdev->rphy.dev); rdev->rphy.dev.parent = get_device(&parent->dev); rdev->rphy.dev.dev_type = SAS_END_DEVICE_TYPE; rdev->rphy.dev.release = sas_end_device_release; if (scsi_is_sas_expander_device(parent->dev.parent)) { struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent); sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d:%d", shost->host_no, rphy->scsi_target_id, parent->port_identifier); } else sprintf(rdev->rphy.dev.bus_id, "end_device-%d:%d", shost->host_no, parent->port_identifier); rdev->rphy.identify.device_type = SAS_END_DEVICE; sas_rphy_initialize(&rdev->rphy); return &rdev->rphy; } EXPORT_SYMBOL(sas_end_device_alloc); /** * Allocate an rphy for an end device. * * Allocates a SAS remote PHY structure, connected to @parent and sets its type. * Valid types are: SAS_EDGE_EXPANDER_DEVICE and SAS_FANOUT_EXPANDER_DEVICE * @parent: parent to connect SAS rphy to * @type: device type * */ /* _VMKLNX_CODECHECK_: sas_expander_alloc */ struct sas_rphy *sas_expander_alloc(struct sas_port *parent, enum sas_device_type type) { struct Scsi_Host *shost; struct sas_expander_device *rdev; struct sas_host_attrs *sas_host; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(parent); VMK_ASSERT((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)); shost = dev_to_shost(&parent->dev); VMK_ASSERT(shost); sas_host = to_sas_host_attrs(shost); VMK_ASSERT(sas_host); rdev = (struct sas_expander_device *) kzalloc(sizeof(struct sas_expander_device), GFP_KERNEL); if (!rdev) { return NULL; } device_initialize(&rdev->rphy.dev); rdev->rphy.dev.parent = get_device(&parent->dev); rdev->rphy.dev.dev_type = SAS_EXPANDER_DEVICE_TYPE; rdev->rphy.dev.release = sas_expander_release; mutex_lock(&sas_host->lock); rdev->rphy.scsi_target_id = sas_host->next_expander_id++; mutex_unlock(&sas_host->lock); sprintf(rdev->rphy.dev.bus_id, "expander-%d:%d", shost->host_no, rdev->rphy.scsi_target_id); rdev->rphy.identify.device_type = type; sas_rphy_initialize(&rdev->rphy); return &rdev->rphy; } EXPORT_SYMBOL(sas_expander_alloc); /* * sas_is_sas_port -- check if it is a port device * @dev: a pointer to a struct device * * RETURN VALUE: * 1 if it is a SAS remote PHY; 0 otherwise * * Include: * scsi/scsi_transport_sas.h * * ESX Deviation Notes: * It uses device instead. */ int scsi_is_sas_port(const struct device *dev) { if (dev) { return(dev->dev_type == SAS_PORT_DEVICE_TYPE); } else { return(0); } } /* * sas_is_sas_phy -- check if it is a phy device * @dev: a pointer to a struct device * * RETURN VALUE: * 1 if it is a SAS PHY; 0 otherwise * * Include: * scsi/scsi_transport_sas.h * * ESX Deviation Notes: * It uses device instead. */ int scsi_is_sas_phy(const struct device *dev) { if (dev) { return(dev->dev_type == SAS_PHY_DEVICE_TYPE); } else { return(0); } } /** * scsi_is_sas_rphy - check if it is a rphy device * @dev: a pointer to struct device * * This function is used to identify if @dev is a SAS remote PHY. * * RETURN VALUE: * Returns TRUE if @dev is a SAS remote PHY, FALSE otherwise. * */ /* _VMKLNX_CODECHECK_: scsi_is_sas_rphy */ int scsi_is_sas_rphy(const struct device *dev) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); if (dev) { return(dev->dev_type == SAS_END_DEVICE_TYPE || dev->dev_type == SAS_EXPANDER_DEVICE_TYPE); } else { return(0); } } EXPORT_SYMBOL(scsi_is_sas_rphy); /** * instantiate SAS transport template * @ft: SAS transport class function template * * RETURN VALUE: * Pointer to SAS transport template */ /* _VMKLNX_CODECHECK_: sas_attach_transport */ struct scsi_transport_template * sas_attach_transport(struct sas_function_template *ft) { struct vmklnx_ScsiModule *vmklnx26ScsiModule; struct sas_internal *i; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(ft); i = (struct sas_internal *) kzalloc(sizeof(struct sas_internal), GFP_KERNEL); VMK_ASSERT(i); if (unlikely(!i)) { VMKLNX_WARN("Unable to allocate memory for spi_internal"); return NULL; } vmklnx26ScsiModule = vmklnx_alloc_scsimod(VMKLNX_SCSI_TRANSPORT_TYPE_SAS, i); if (!vmklnx26ScsiModule) { kfree(i); return NULL; } i->t.module = (void *)vmklnx26ScsiModule; i->t.host_size = sizeof(struct sas_host_attrs); i->f = ft; return &i->t; } EXPORT_SYMBOL(sas_attach_transport); /** * sas_release_transport - Release SAS transport template instance * @t: transport template instance * * RETURN VALUE: * None. */ /* _VMKLNX_CODECHECK_: sas_release_transport */ void sas_release_transport(struct scsi_transport_template *t) { struct sas_internal *i = to_sas_internal(t); VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); /* * Free up the module structure */ kfree(t->module); /* * Free up the transport_internal structure */ kfree(i); } EXPORT_SYMBOL(sas_release_transport); void sas_phy_delete(struct sas_phy *phy) { struct device *dev = &phy->dev; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); if(NULL == phy) { return; } device_del(dev); put_device(dev); return; } EXPORT_SYMBOL(sas_phy_delete); static int do_sas_phy_delete(struct device *dev, void *data) { int pass = (int)(unsigned long)data; if (pass == 0 && scsi_is_sas_port(dev)) sas_port_delete(dev_to_sas_port(dev)); else if (pass == 1 && scsi_is_sas_phy(dev)) sas_phy_delete(dev_to_phy(dev)); return 0; } /** * Issues SCSI Mode Sense command for page 0x19 to the passed in scsi_device. * Figures out the associated SAS end device and populates appropriate fields * on success. * @sdev: Pointer to SCSI device of type struct scsi_device * * RETURN VALUE: * 0 on Sucess, non-zero otherwise * */ /* _VMKLNX_CODECHECK_: sas_read_port_mode_page */ int sas_read_port_mode_page(struct scsi_device *sdev) { /* only need 8 bytes of data plus header (4 or 8) */ #define BUF_SIZE 64 unsigned char *buffer; unsigned char *msdata; struct sas_rphy *rphy; struct sas_end_device *rdev; struct scsi_mode_data mode_data; int res, error; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(sdev); VMK_ASSERT(sdev->sdev_target); rphy = target_to_rphy(sdev->sdev_target); VMK_ASSERT(rphy); VMK_ASSERT(rphy->identify.device_type == SAS_END_DEVICE); rdev = rphy_to_end_device(rphy); buffer = (unsigned char *) vmklnx_kzmalloc(vmklnxLowHeap, BUF_SIZE, GFP_KERNEL); if (!buffer) return -ENOMEM; res = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3, &mode_data, NULL); error = -EINVAL; if (!scsi_status_is_good(res)) goto out; msdata = buffer + mode_data.header_length + mode_data.block_descriptor_length; if (msdata - buffer > BUF_SIZE - 8) goto out; error = 0; rdev->ready_led_meaning = msdata[2] & 0x10 ? 1 : 0; rdev->I_T_nexus_loss_timeout = (msdata[4] << 8) + msdata[5]; rdev->initiator_response_timeout = (msdata[6] << 8) + msdata[7]; out: vmklnx_kfree(vmklnxLowHeap, buffer); return error; } EXPORT_SYMBOL(sas_read_port_mode_page); /* * tear down a device SAS data structure * @dev: a pointer to device belonging to the SAS object * * RETURN VALUE: * void * * Include: * scsi/scsi_transport_sas.h * * comments * It must be called just before scsi_remove_host for SAS HBAs. * */ void sas_remove_children(struct device *dev) { VMK_ASSERT(dev); device_for_each_child(dev, (void *)0, do_sas_phy_delete); device_for_each_child(dev, (void *)1, do_sas_phy_delete); } static int sas_remove_scsi_target(struct device *dev, void *data) { struct scsi_target *stgt = to_scsi_target(dev); VMKLNX_DEBUG(0, "Entered for dev: %p stgt: %p shost:%p " "new_rphy: %p", dev, stgt, dev_to_shost(dev), data); if (dev && stgt) { scsi_remove_target(&stgt->dev); VMKLNX_DEBUG(3, "Found and deleted target " "(i.e stgt:%p &stgt->dev:%p", stgt, &stgt->dev); } return 0; } /** * sas_remove_host - terminate any sas_transport related elements * @shost: Pointer to struct Scsi_Host * * Terminates any sas_transport related elements for a scsi host. * This routine is expected to be called immediately preceeding the call from * the driver to scsi_remove_host(). * * RETURN VALUE: * None */ /* _VMKLNX_CODECHECK_: sas_remove_host */ void sas_remove_host(struct Scsi_Host *shost) { struct sas_host_attrs *sas_host; struct sas_rphy *t_rphy, *t_rphy1; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMKLNX_DEBUG(0, "shost:%p", shost); sas_host = to_sas_host_attrs(shost); list_for_each_entry_safe(t_rphy, t_rphy1, &sas_host->freed_rphy_list, list) { VMKLNX_DEBUG(0, "shost:%p, processing t_rphy:%p", shost, t_rphy); device_for_each_child(&t_rphy->dev, NULL, sas_remove_scsi_target); sas_rphy_free(t_rphy); } sas_remove_children(&shost->shost_gendev); } EXPORT_SYMBOL(sas_remove_host); /* * sas_port_alloc -- allocate and initialize a SAS port structure * * @parent: a pointer to the parent device * @port_id: a port number * * RETURN VALUE: * a pointer to the allocated PHY structure; NULL if failed * * Include: * scsi/scsi_transport_sas.h * */ struct sas_port *sas_port_alloc(struct device *parent, int port_id) { struct Scsi_Host *shost = dev_to_shost(parent); struct sas_port *port; port = (struct sas_port *) kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return NULL; port->port_identifier = port_id; device_initialize(&port->dev); port->dev.parent = get_device(parent); port->dev.dev_type = SAS_PORT_DEVICE_TYPE; port->dev.release = sas_port_release; mutex_init(&port->phy_list_mutex); INIT_LIST_HEAD(&port->phy_list); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); sprintf(port->dev.bus_id, "port-%d:%d:%d", shost->host_no, rphy->scsi_target_id, port->port_identifier); } else { sprintf(port->dev.bus_id, "port-%d:%d", shost->host_no, port->port_identifier); } return port; } /** * Allocates a SAS port structure and a number to go with it. This interface is * really for adapters where the port number has no meaning, so the sas class * should manage them. It will be added to the device tree below the device * specified by @parent which must be either a Scsi_Host or a * sas_expander_device. * @parent: a pointer to the parent device * * RETURN VALUE: * Returns a pointer to the allocated PHY structure; Returns NULL on error. * */ /* _VMKLNX_CODECHECK_: sas_port_alloc_num */ struct sas_port *sas_port_alloc_num(struct device *parent) { int index; struct Scsi_Host *shost; struct sas_host_attrs *sas_host; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(parent); shost = dev_to_shost(parent); VMK_ASSERT(shost); sas_host = to_sas_host_attrs(shost); VMK_ASSERT(sas_host); /* FIXME: use idr for this eventually */ mutex_lock(&sas_host->lock); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); struct sas_expander_device *exp = rphy_to_expander_device(rphy); index = exp->next_port_id++; } else index = sas_host->next_port_id++; mutex_unlock(&sas_host->lock); return sas_port_alloc(parent, index); } EXPORT_SYMBOL(sas_port_alloc_num); /** * Marks the port as a backlink. * @port: a pointer to struct sas_port * * RETURN VALUE: * This function does not return a value. * */ /* _VMKLNX_CODECHECK_: sas_port_mark_backlink */ void sas_port_mark_backlink(struct sas_port *port) { struct device *parent; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(port); parent = port->dev.parent->parent->parent; if (port->is_backlink) return; port->is_backlink = 1; } EXPORT_SYMBOL(sas_port_mark_backlink); /** * Add the passed in SAS port to the device hierarchy * @port: Pointer to SAS port of type struct sas_port * * RETURN VALUE: * 0 on success and non-zero on error * */ /* _VMKLNX_CODECHECK_: sas_port_add */ int sas_port_add(struct sas_port *port) { int error; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); /* No phys should be added until this is made visible */ BUG_ON(!list_empty(&port->phy_list)); error = device_add(&port->dev); return error; } EXPORT_SYMBOL(sas_port_add); /** * Add the passed in PHY to the passed in SAS port. * This API is usually called by SAS drivers that discover the SAS ports and * want to associate the PHY with the SAS port. * @port: Pointer to SAS port of type struct sas_port * @phy: Pointer to a PHY of type struct sas_phy * * ESX Deviation Notes: * No sysfs links are created on ESXi */ /* _VMKLNX_CODECHECK_: sas_port_add_phy */ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(port); VMK_ASSERT(phy); mutex_lock(&port->phy_list_mutex); if (unlikely(!list_empty(&phy->port_siblings))) { /* make sure we're already on this port */ struct sas_phy *tmp; list_for_each_entry(tmp, &port->phy_list, port_siblings) if (tmp == phy) break; /* If this trips, you added a phy that was already * part of a different port */ if (unlikely(tmp != phy)) { dev_printk(KERN_ERR, &port->dev, "trying to add" " phy %s fails: it's already part of another port\n", phy->dev.bus_id); VMK_ASSERT(FALSE); } } else { list_add_tail(&phy->port_siblings, &port->phy_list); port->num_phys++; } mutex_unlock(&port->phy_list_mutex); } EXPORT_SYMBOL(sas_port_add_phy); /** * Removes the passed in PHY from the passed in SAS port. * This operation is usually done as part of tearing down ports. * @port: Pointer to a SAS port * @phy: Pointer to the phy to be removed from the SAS port * */ /* _VMKLNX_CODECHECK_: sas_port_delete_phy */ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) { VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(port); VMK_ASSERT(phy); mutex_lock(&port->phy_list_mutex); list_del_init(&phy->port_siblings); port->num_phys--; mutex_unlock(&port->phy_list_mutex); } EXPORT_SYMBOL(sas_port_delete_phy); /** * Removes the specified SAS port. * If any PHYs are found hanging from the passed in SAS port, * those are cleaned up as well. * @port: Pointer to a SAS port (of type struct sas_port) to be removed * */ /* _VMKLNX_CODECHECK_: sas_port_delete */ void sas_port_delete(struct sas_port *port) { struct device *dev; struct sas_phy *phy, *tmp_phy; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(port); dev = &port->dev; if (port->rphy) { sas_rphy_delete(port->rphy); port->rphy = NULL; } mutex_lock(&port->phy_list_mutex); list_for_each_entry_safe(phy, tmp_phy, &port->phy_list, port_siblings) { list_del_init(&phy->port_siblings); } mutex_unlock(&port->phy_list_mutex); if (port->is_backlink) { port->is_backlink = 0; } device_del(dev); put_device(dev); } EXPORT_SYMBOL(sas_port_delete); /* * sas_find_rphy -- Find a matching rphy * * RETURN VALUE: * None * * Include: * scsi/scsi_host.h */ struct sas_rphy * sas_find_rphy(struct Scsi_Host *sh, uint id) { struct sas_host_attrs *sas_host; struct sas_rphy *rphy, *found_rphy = NULL; VMK_ASSERT(sh); sas_host = to_sas_host_attrs(sh); VMK_ASSERT(sas_host); /* * Search for an existing target for this sdev. */ mutex_lock(&sas_host->lock); list_for_each_entry(rphy, &sas_host->rphy_list, list) { if (rphy->scsi_target_id == id) { found_rphy = rphy; break; } } mutex_unlock(&sas_host->lock); return found_rphy; } static void fc_rport_dev_release(struct device *dev) { struct fc_rport *rport = dev_to_rport(dev); put_device(dev->parent); kfree(rport); } static void fc_vport_dev_release(struct device *dev) { struct fc_vport *vport = dev_to_vport(dev); put_device(dev->parent); /* release kobj parent */ kfree(vport); } static void sas_phy_release(struct device *dev) { struct sas_phy *phy = dev_to_phy(dev); put_device(dev->parent); kfree(phy); } static void sas_port_release(struct device *dev) { struct sas_port *port = dev_to_sas_port(dev); put_device(dev->parent); kfree(port); } static void sas_end_device_release(struct device *dev) { struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_end_device *edev = rphy_to_end_device(rphy); put_device(dev->parent); kfree(edev); } static void sas_expander_release(struct device *dev) { struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_expander_device *edev = rphy_to_expander_device(rphy); put_device(dev->parent); kfree(edev); } /** * Copies the given values to the Vmkernel storage management structure. * The given MAC address pointers are permitted to be NULL, for which * the corresponding management information will not be set. * @shost: Pointer to shost structure * @netdevName: Pointer to CNA net_device name * @vid: VLAN ID * @fcoeControllerMac: FCOE Controller MAC address * @vnportMac: VNPort MAC address * @fcfMac: FCF MAC address * * ESX Deviation Notes: * This API is not present in Linux. This should be called immediately * after FLOGI is completed. */ /* _VMKLNX_CODECHECK_: vmlnx_init_fcoe_attribs */ int vmklnx_init_fcoe_attribs(struct Scsi_Host *shost, char *netdevName, unsigned short vid, unsigned char *fcoeControllerMac, unsigned char *vnportMac, unsigned char *fcfMac) { struct fc_host_attrs *fc_host; struct vmk_FcoeAdapterAttrs *attrs; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); VMK_ASSERT(shost); VMK_ASSERT(netdevName); fc_host = shost_to_fc_host(shost); VMK_ASSERT(fc_host); attrs = (struct vmk_FcoeAdapterAttrs *) fc_host->cna_ops; strncpy(attrs->vmnicName, netdevName, VMK_DEVICE_NAME_MAX_LENGTH); attrs->vmnicName[VMK_DEVICE_NAME_MAX_LENGTH - 1] = '\0'; attrs->vlanId = vid; #define COPY_MAC(dst, src) \ if (src) { \ memcpy(dst, src, VMK_MAX_ETHERNET_MAC_LENGTH); \ } COPY_MAC(attrs->fcoeContlrMacAddr, fcoeControllerMac); COPY_MAC(attrs->vnPortMacAddr, vnportMac); COPY_MAC(attrs->fcfMacAddr, fcfMac); #undef COPY_MAC return 0; } EXPORT_SYMBOL(vmklnx_init_fcoe_attribs); /* * SCSILinux_InitTransport * * Entry point for SCSI Transport-specific initialization. * Called as part of SCSILinux_Init in linux_scsi.c. * * Results: * None. * * Side effects: * Initializes SCSI Transport log. */ void SCSILinux_InitTransport(void) { VMKLNX_CREATE_LOG(); } /* * SCSILinux_CleanupTransport * * Entry point for SCSI Transport-specific teardown. * Called as part of SCSILinux_Cleanup in linux_scsi.c. * * Results: * None. * * Side effects: * Cleans up SCSI Transport log. */ void SCSILinux_CleanupTransport(void) { VMKLNX_DESTROY_LOG(); }
pombredanne/https-git.sfconservancy.org-vmkdrivers
vmkdrivers/src_92/vmklinux_92/vmware/linux_scsi_transport.c
C
gpl-2.0
121,432
package cn.me.fdfs.service.impl; import cn.me.fdfs.service.BaseService; import cn.me.fdfs.service.WarningService; import cn.me.fdfs.vo.PageInfo; import cn.me.fdfs.vo.WarningData; import cn.me.fdfs.vo.WarningUser; import com.mysql.jdbc.StringUtils; import org.csource.common.MyException; import org.hibernate.Query; import org.hibernate.Session; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; import java.io.IOException; import java.util.ArrayList; import java.util.List; /** * Created with IntelliJ IDEA. * User: wanglt * Date: 12-8-28 * Time: 上午10:38 * To change this template use File | Settings | File Templates. */ @Service public class WarningServiceImpl extends BaseService implements WarningService { private static final Logger logger = LoggerFactory .getLogger(WarningServiceImpl.class); @Override @Transactional(propagation = Propagation.REQUIRED) public void updateWarning(WarningData wd) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. Session session = getSession(); session.saveOrUpdate(wd); } @Override @Transactional(propagation = Propagation.REQUIRED) public List<WarningData> findWarning() throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. List<WarningData> warningDatas = new ArrayList<WarningData>(); Session session = getSession(); StringBuilder queryString = new StringBuilder("from WarningData as w"); Query query = session.createQuery(queryString.toString()); warningDatas = query.list(); return warningDatas; } @Override @Transactional(propagation = Propagation.REQUIRED) public List<WarningData> findWarning(WarningData wd,PageInfo pageInfo) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. List<WarningData> warningDatas = new ArrayList<WarningData>(); Session session = getSession(); StringBuilder queryString = new StringBuilder("from WarningData as wd "); if(!StringUtils.isNullOrEmpty(wd.getWdIpAddr())){ queryString.append("where wd.wdIpAddr like '%"+wd.getWdIpAddr()+"%'"); } Query query = session.createQuery(queryString.toString()); pageInfo.setTotalCount(query.list().size()); query.setMaxResults(pageInfo.getNumPerPage()); query.setFirstResult((pageInfo.getPageNum()-1)*pageInfo.getNumPerPage()); warningDatas = query.list(); return warningDatas; } @Override @Transactional(propagation = Propagation.REQUIRED) public WarningData findById(String id) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. WarningData wd=new WarningData(); Session session = getSession(); wd= (WarningData) session.get(WarningData.class,id); return wd; } @Override @Transactional(propagation = Propagation.REQUIRED) public void delWarning(String id) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. WarningData wd=new WarningData(); wd.setId(id); Session session = getSession(); session.delete(wd); } @Override @Transactional(propagation = Propagation.REQUIRED) public List<WarningData> findByIp(String ip) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. List<WarningData> warningDatas = new ArrayList<WarningData>(); Session session = getSession(); Query query = session .createQuery("from WarningData wd where wd.wdIpAddr=:ip"); warningDatas = query.setString("ip", ip).list(); return warningDatas; } @Override @Transactional(propagation = Propagation.REQUIRED) public List<WarningUser> findWarUser() throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. List<WarningUser> warningUsers = new ArrayList<WarningUser>(); Session session = getSession(); StringBuilder queryString = new StringBuilder("from WarningUser as w"); Query query = session.createQuery(queryString.toString()); warningUsers = query.list(); return warningUsers; } @Override @Transactional(propagation = Propagation.REQUIRED) public List<WarningUser> findWarUser(WarningUser wu,PageInfo pageInfo) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. List<WarningUser> warningUsers = new ArrayList<WarningUser>(); Session session = getSession(); StringBuilder queryString = new StringBuilder("from WarningUser as w "); if(!StringUtils.isNullOrEmpty(wu.getName())){ queryString.append("where w.name like '%"+wu.getName()+"%'"); } Query query = session.createQuery(queryString.toString()); pageInfo.setTotalCount(query.list().size()); query.setMaxResults(pageInfo.getNumPerPage()); query.setFirstResult((pageInfo.getPageNum()-1)*pageInfo.getNumPerPage()); warningUsers = query.list(); return warningUsers; } @Override @Transactional(propagation = Propagation.REQUIRED) public WarningUser findUserId(String id) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. WarningUser wu=new WarningUser(); Session session = getSession(); wu= (WarningUser) session.get(WarningUser.class,id); return wu; } @Override @Transactional(propagation = Propagation.REQUIRED) public void delWarUser(String id) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. WarningUser wu=new WarningUser(); wu.setId(id); Session session = getSession(); session.delete(wu); } @Override @Transactional(propagation = Propagation.REQUIRED) public void updateWarUser(WarningUser wu) throws IOException, MyException { //To change body of implemented methods use File | Settings | File Templates. Session session = getSession(); session.saveOrUpdate(wu); } }
fengshao0907/fastdfs-zyc-1
fastdfs-zyc/main/java/cn/me/fdfs/service/impl/WarningServiceImpl.java
Java
gpl-2.0
6,715
/* * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips * * Copyright (C) 2006-2007 Texas Instruments, Inc. * Copyright (C) 2006 MontaVista Software, Inc. * * Code re-arranged and cleaned up by: * Syed Mohammed Khasim <x0khasim@ti.com> * * Initial Code: * Andy Lowe / Nishanth Menon * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/i2c/twl4030.h> #ifdef CONFIG_MACH_OMAP3621_EVT1A #include <mach/board-boxer.h> #endif /* CONFIG_MACH_OMAP3621_EVT1A */ /* * The GPIO "subchip" supports 18 GPIOs which can be configured as * inputs or outputs, with pullups or pulldowns on each pin. Each * GPIO can trigger interrupts on either or both edges. * * GPIO interrupts can be fed to either of two IRQ lines; this is * intended to support multiple hosts. * * There are also two LED pins used sometimes as output-only GPIOs. */ #define DCDC_GLOBAL_CFG (0x61-0x5b) static struct gpio_chip twl_gpiochip; static int twl4030_gpio_irq_base; /* genirq interfaces are not available to modules */ #ifdef MODULE #define is_module() true #else #define is_module() false #endif /* GPIO_CTRL Fields */ #define MASK_GPIO_CTRL_GPIO0CD1 BIT(0) #define MASK_GPIO_CTRL_GPIO1CD2 BIT(1) #define MASK_GPIO_CTRL_GPIO_ON BIT(2) /* Mask for GPIO registers when aggregated into a 32-bit integer */ #define GPIO_32_MASK 0x0003ffff /* Data structures */ static DEFINE_MUTEX(gpio_lock); /* store usage of each GPIO. - each bit represents one GPIO */ static unsigned int gpio_usage_count; /*----------------------------------------------------------------------*/ /* * To configure TWL4030 GPIO module registers */ static inline int gpio_twl4030_write(u8 address, u8 data) { return twl4030_i2c_write_u8(TWL4030_MODULE_GPIO, data, address); } /*----------------------------------------------------------------------*/ /* * LED register offsets (use TWL4030_MODULE_{LED,PWMA,PWMB})) * PWMs A and B are dedicated to LEDs A and B, respectively. */ #define TWL4030_LED_LEDEN 0x0 /* LEDEN bits */ #define LEDEN_LEDAON BIT(0) #define LEDEN_LEDBON BIT(1) #define LEDEN_LEDAEXT BIT(2) #define LEDEN_LEDBEXT BIT(3) #define LEDEN_LEDAPWM BIT(4) #define LEDEN_LEDBPWM BIT(5) #define LEDEN_PWM_LENGTHA BIT(6) #define LEDEN_PWM_LENGTHB BIT(7) #define TWL4030_PWMx_PWMxON 0x0 #define TWL4030_PWMx_PWMxOFF 0x1 #define PWMxON_LENGTH BIT(7) /*----------------------------------------------------------------------*/ /* * To read a TWL4030 GPIO module register */ static inline int gpio_twl4030_read(u8 address) { u8 data; int ret = 0; ret = twl4030_i2c_read_u8(TWL4030_MODULE_GPIO, &data, address); return (ret < 0) ? ret : data; } /*----------------------------------------------------------------------*/ static u8 cached_leden; /* protected by gpio_lock */ /* The LED lines are open drain outputs ... a FET pulls to GND, so an * external pullup is needed. We could also expose the integrated PWM * as a LED brightness control; we initialize it as "always on". */ static void twl4030_led_set_value(int led, int value) { u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM; int status; if (led) mask <<= 1; mutex_lock(&gpio_lock); if (value) cached_leden &= ~mask; else cached_leden |= mask; status = twl4030_i2c_write_u8(TWL4030_MODULE_LED, cached_leden, TWL4030_LED_LEDEN); mutex_unlock(&gpio_lock); } static int twl4030_set_gpio_direction(int gpio, int is_input) { u8 d_bnk = gpio >> 3; u8 d_msk = BIT(gpio & 0x7); u8 reg = 0; u8 base = REG_GPIODATADIR1 + d_bnk; int ret = 0; mutex_lock(&gpio_lock); ret = gpio_twl4030_read(base); if (ret >= 0) { if (is_input) reg = ret & ~d_msk; else reg = ret | d_msk; ret = gpio_twl4030_write(base, reg); } mutex_unlock(&gpio_lock); return ret; } static int twl4030_set_gpio_dataout(int gpio, int enable) { u8 d_bnk = gpio >> 3; u8 d_msk = BIT(gpio & 0x7); u8 base = 0; if (enable) base = REG_SETGPIODATAOUT1 + d_bnk; else base = REG_CLEARGPIODATAOUT1 + d_bnk; return gpio_twl4030_write(base, d_msk); } static int twl4030_get_gpio_datain(int gpio) { u8 d_bnk = gpio >> 3; u8 d_off = gpio & 0x7; u8 base = 0; int ret = 0; if (unlikely((gpio >= TWL4030_GPIO_MAX) || !(gpio_usage_count & BIT(gpio)))) return -EPERM; base = REG_GPIODATAIN1 + d_bnk; ret = gpio_twl4030_read(base); if (ret > 0) ret = (ret >> d_off) & 0x1; return ret; } /*----------------------------------------------------------------------*/ static int twl_request(struct gpio_chip *chip, unsigned offset) { int status = 0; int ret; u8 RdReg; mutex_lock(&gpio_lock); /* Support the two LED outputs as output-only GPIOs. */ if (offset >= TWL4030_GPIO_MAX) { u8 ledclr_mask = LEDEN_LEDAON | LEDEN_LEDAEXT | LEDEN_LEDAPWM | LEDEN_PWM_LENGTHA; u8 module = TWL4030_MODULE_PWMA; offset -= TWL4030_GPIO_MAX; if (offset) { ledclr_mask <<= 1; module = TWL4030_MODULE_PWMB; } /* initialize PWM to always-drive */ status = twl4030_i2c_write_u8(module, 0x7f, TWL4030_PWMx_PWMxOFF); if (status < 0) goto done; status = twl4030_i2c_write_u8(module, 0x7f, TWL4030_PWMx_PWMxON); if (status < 0) goto done; /* init LED to not-driven (high) */ module = TWL4030_MODULE_LED; status = twl4030_i2c_read_u8(module, &cached_leden, TWL4030_LED_LEDEN); if (status < 0) goto done; cached_leden &= ~ledclr_mask; status = twl4030_i2c_write_u8(module, cached_leden, TWL4030_LED_LEDEN); if (status < 0) goto done; status = 0; goto done; } /* on first use, turn GPIO module "on" */ if (!gpio_usage_count) { struct twl4030_gpio_platform_data *pdata; u8 value = MASK_GPIO_CTRL_GPIO_ON; /* optionally have the first two GPIOs switch vMMC1 * and vMMC2 power supplies based on card presence. */ #ifdef CONFIG_MACH_OMAP3621_EVT1A /* Change Triton card power based on polarity on CD1 */ if(is_encore_board_evt2()) { ret = twl4030_i2c_read_u8(TWL4030_MODULE_PM_RECEIVER, &RdReg, DCDC_GLOBAL_CFG); RdReg |= 0x40; ret |= twl4030_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, RdReg, DCDC_GLOBAL_CFG); } #endif /* CONFIG_MACH_OMAP3621_EVT1A */ pdata = chip->dev->platform_data; value |= pdata->mmc_cd & 0x03; status = gpio_twl4030_write(REG_GPIO_CTRL, value); } if (!status) gpio_usage_count |= (0x1 << offset); done: mutex_unlock(&gpio_lock); return status; } static void twl_free(struct gpio_chip *chip, unsigned offset) { if (offset >= TWL4030_GPIO_MAX) { twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1); return; } mutex_lock(&gpio_lock); gpio_usage_count &= ~BIT(offset); /* on last use, switch off GPIO module */ if (!gpio_usage_count) gpio_twl4030_write(REG_GPIO_CTRL, 0x0); mutex_unlock(&gpio_lock); } static int twl_direction_in(struct gpio_chip *chip, unsigned offset) { return (offset < TWL4030_GPIO_MAX) ? twl4030_set_gpio_direction(offset, 1) : -EINVAL; } static int twl_get(struct gpio_chip *chip, unsigned offset) { int status = 0; if (offset < TWL4030_GPIO_MAX) status = twl4030_get_gpio_datain(offset); else if (offset == TWL4030_GPIO_MAX) status = cached_leden & LEDEN_LEDAON; else status = cached_leden & LEDEN_LEDBON; return (status < 0) ? 0 : status; } static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value) { if (offset < TWL4030_GPIO_MAX) { twl4030_set_gpio_dataout(offset, value); return twl4030_set_gpio_direction(offset, 0); } else { twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); return 0; } } static void twl_set(struct gpio_chip *chip, unsigned offset, int value) { if (offset < TWL4030_GPIO_MAX) twl4030_set_gpio_dataout(offset, value); else twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value); } static int twl_to_irq(struct gpio_chip *chip, unsigned offset) { return (twl4030_gpio_irq_base && (offset < TWL4030_GPIO_MAX)) ? (twl4030_gpio_irq_base + offset) : -EINVAL; } static struct gpio_chip twl_gpiochip = { .label = "twl4030", .owner = THIS_MODULE, .request = twl_request, .free = twl_free, .direction_input = twl_direction_in, .get = twl_get, .direction_output = twl_direction_out, .set = twl_set, .to_irq = twl_to_irq, .can_sleep = 1, }; /*----------------------------------------------------------------------*/ static int __devinit gpio_twl4030_pulls(u32 ups, u32 downs) { u8 message[6]; unsigned i, gpio_bit; /* For most pins, a pulldown was enabled by default. * We should have data that's specific to this board. */ for (gpio_bit = 1, i = 1; i < 6; i++) { u8 bit_mask; unsigned j; for (bit_mask = 0, j = 0; j < 8; j += 2, gpio_bit <<= 1) { if (ups & gpio_bit) bit_mask |= 1 << (j + 1); else if (downs & gpio_bit) bit_mask |= 1 << (j + 0); } message[i] = bit_mask; } return twl4030_i2c_write(TWL4030_MODULE_GPIO, message, REG_GPIOPUPDCTR1, 5); } static int __devinit gpio_twl4030_debounce(u32 debounce, u8 mmc_cd) { u8 message[4]; /* 30 msec of debouncing is always used for MMC card detect, * and is optional for everything else. */ message[1] = (debounce & 0xff) | (mmc_cd & 0x03); debounce >>= 8; message[2] = (debounce & 0xff); debounce >>= 8; message[3] = (debounce & 0x03); return twl4030_i2c_write(TWL4030_MODULE_GPIO, message, REG_GPIO_DEBEN1, 3); } static int gpio_twl4030_remove(struct platform_device *pdev); static int __devinit gpio_twl4030_probe(struct platform_device *pdev) { struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data; int ret; /* maybe setup IRQs */ if (pdata->irq_base) { if (is_module()) { dev_err(&pdev->dev, "can't dispatch IRQs from modules\n"); goto no_irqs; } ret = twl4030_sih_setup(TWL4030_MODULE_GPIO); if (ret < 0) return ret; WARN_ON(ret != pdata->irq_base); twl4030_gpio_irq_base = ret; } no_irqs: /* * NOTE: boards may waste power if they don't set pullups * and pulldowns correctly ... default for non-ULPI pins is * pulldown, and some other pins may have external pullups * or pulldowns. Careful! */ ret = gpio_twl4030_pulls(pdata->pullups, pdata->pulldowns); if (ret) dev_dbg(&pdev->dev, "pullups %.05x %.05x --> %d\n", pdata->pullups, pdata->pulldowns, ret); ret = gpio_twl4030_debounce(pdata->debounce, pdata->mmc_cd); if (ret) dev_dbg(&pdev->dev, "debounce %.03x %.01x --> %d\n", pdata->debounce, pdata->mmc_cd, ret); twl_gpiochip.base = pdata->gpio_base; twl_gpiochip.ngpio = TWL4030_GPIO_MAX; twl_gpiochip.dev = &pdev->dev; /* NOTE: we assume VIBRA_CTL.VIBRA_EN, in MODULE_AUDIO_VOICE, * is (still) clear if use_leds is set. */ if (pdata->use_leds) twl_gpiochip.ngpio += 2; ret = gpiochip_add(&twl_gpiochip); if (ret < 0) { dev_err(&pdev->dev, "could not register gpiochip, %d\n", ret); twl_gpiochip.ngpio = 0; gpio_twl4030_remove(pdev); } else if (pdata->setup) { int status; status = pdata->setup(&pdev->dev, pdata->gpio_base, TWL4030_GPIO_MAX); if (status) dev_dbg(&pdev->dev, "setup --> %d\n", status); } return ret; } static int __devexit gpio_twl4030_remove(struct platform_device *pdev) { struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data; int status; if (pdata->teardown) { status = pdata->teardown(&pdev->dev, pdata->gpio_base, TWL4030_GPIO_MAX); if (status) { dev_dbg(&pdev->dev, "teardown --> %d\n", status); return status; } } status = gpiochip_remove(&twl_gpiochip); if (status < 0) return status; if (is_module()) return 0; /* REVISIT no support yet for deregistering all the IRQs */ WARN_ON(1); return -EIO; } /* Note: this hardware lives inside an I2C-based multi-function device. */ MODULE_ALIAS("platform:twl4030_gpio"); static struct platform_driver gpio_twl4030_driver = { .driver.name = "twl4030_gpio", .driver.owner = THIS_MODULE, .probe = gpio_twl4030_probe, .remove = __devexit_p(gpio_twl4030_remove), }; static int __init gpio_twl4030_init(void) { return platform_driver_register(&gpio_twl4030_driver); } subsys_initcall(gpio_twl4030_init); static void __exit gpio_twl4030_exit(void) { platform_driver_unregister(&gpio_twl4030_driver); } module_exit(gpio_twl4030_exit); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_DESCRIPTION("GPIO interface for TWL4030"); MODULE_LICENSE("GPL");
ka6sox/nook_kernel
drivers/gpio/twl4030-gpio.c
C
gpl-2.0
13,253
/* * Routines to indentify caches on Intel CPU. * * Changes: * Venkatesh Pallipadi : Adding cache identification through cpuid(4) * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/pci.h> #include <asm/processor.h> #include <linux/smp.h> #include <asm/amd_nb.h> #include <asm/smp.h> #define LVL_1_INST 1 #define LVL_1_DATA 2 #define LVL_2 3 #define LVL_3 4 #define LVL_TRACE 5 struct _cache_table { unsigned char descriptor; char cache_type; short size; }; #define MB(x) ((x) * 1024) /* All the cache descriptor types we care about (no TLB or trace cache entries) */ static const struct _cache_table __cpuinitconst cache_table[] = { { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */ { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */ { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */ { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */ { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */ { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */ { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */ { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */ { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */ { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */ { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */ { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */ { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */ { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */ { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */ { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */ { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */ { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */ { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */ { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */ { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */ { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */ { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */ { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */ { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */ { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */ { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */ { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */ { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */ { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */ { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */ { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */ { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */ { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */ { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */ { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */ { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */ { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */ { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */ { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */ { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */ { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */ { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */ { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */ { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */ { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */ { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */ { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */ { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */ { 0x00, 0, 0} }; enum _cache_type { CACHE_TYPE_NULL = 0, CACHE_TYPE_DATA = 1, CACHE_TYPE_INST = 2, CACHE_TYPE_UNIFIED = 3 }; union _cpuid4_leaf_eax { struct { enum _cache_type type:5; unsigned int level:3; unsigned int is_self_initializing:1; unsigned int is_fully_associative:1; unsigned int reserved:4; unsigned int num_threads_sharing:12; unsigned int num_cores_on_die:6; } split; u32 full; }; union _cpuid4_leaf_ebx { struct { unsigned int coherency_line_size:12; unsigned int physical_line_partition:10; unsigned int ways_of_associativity:10; } split; u32 full; }; union _cpuid4_leaf_ecx { struct { unsigned int number_of_sets:32; } split; u32 full; }; struct _cpuid4_info_regs { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned long size; struct amd_northbridge *nb; }; struct _cpuid4_info { struct _cpuid4_info_regs base; DECLARE_BITMAP(shared_cpu_map, NR_CPUS); }; unsigned short num_cache_leaves; /* AMD doesn't have CPUID4. Emulate it here to report the same information to the user. This makes some assumptions about the machine: L2 not shared, no SMT etc. that is currently true on AMD CPUs. In theory the TLBs could be reported as fake type (they are in "dummy"). Maybe later */ union l1_cache { struct { unsigned line_size:8; unsigned lines_per_tag:8; unsigned assoc:8; unsigned size_in_kb:8; }; unsigned val; }; union l2_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned size_in_kb:16; }; unsigned val; }; union l3_cache { struct { unsigned line_size:8; unsigned lines_per_tag:4; unsigned assoc:4; unsigned res:2; unsigned size_encoded:14; }; unsigned val; }; static const unsigned short __cpuinitconst assocs[] = { [1] = 1, [2] = 2, [4] = 4, [6] = 8, [8] = 16, [0xa] = 32, [0xb] = 48, [0xc] = 64, [0xd] = 96, [0xe] = 128, [0xf] = 0xffff /* fully associative - no way to show this currently */ }; static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, union _cpuid4_leaf_ebx *ebx, union _cpuid4_leaf_ecx *ecx) { unsigned dummy; unsigned line_size, lines_per_tag, assoc, size_in_kb; union l1_cache l1i, l1d; union l2_cache l2; union l3_cache l3; union l1_cache *l1 = &l1d; eax->full = 0; ebx->full = 0; ecx->full = 0; cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val); cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val); switch (leaf) { case 1: l1 = &l1i; case 0: if (!l1->val) return; assoc = assocs[l1->assoc]; line_size = l1->line_size; lines_per_tag = l1->lines_per_tag; size_in_kb = l1->size_in_kb; break; case 2: if (!l2.val) return; assoc = assocs[l2.assoc]; line_size = l2.line_size; lines_per_tag = l2.lines_per_tag; /* cpu_data has errata corrections for K7 applied */ size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); break; case 3: if (!l3.val) return; assoc = assocs[l3.assoc]; line_size = l3.line_size; lines_per_tag = l3.lines_per_tag; size_in_kb = l3.size_encoded * 512; if (boot_cpu_has(X86_FEATURE_AMD_DCM)) { size_in_kb = size_in_kb >> 1; assoc = assoc >> 1; } break; default: return; } eax->split.is_self_initializing = 1; eax->split.type = types[leaf]; eax->split.level = levels[leaf]; eax->split.num_threads_sharing = 0; eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; if (assoc == 0xffff) eax->split.is_fully_associative = 1; ebx->split.coherency_line_size = line_size - 1; ebx->split.ways_of_associativity = assoc - 1; ebx->split.physical_line_partition = lines_per_tag - 1; ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / (ebx->split.ways_of_associativity + 1) - 1; } struct _cache_attr { struct attribute attr; ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int); ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count, unsigned int); }; #ifdef CONFIG_AMD_NB /* * L3 cache descriptors */ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) { struct amd_l3_cache *l3 = &nb->l3_cache; unsigned int sc0, sc1, sc2, sc3; u32 val = 0; pci_read_config_dword(nb->misc, 0x1C4, &val); /* calculate subcache sizes */ l3->subcaches[0] = sc0 = !(val & BIT(0)); l3->subcaches[1] = sc1 = !(val & BIT(4)); l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9)); l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13)); l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; } static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) { int node; /* only for L3, and not in virtualized environments */ if (index < 3) return; node = amd_get_nb_id(smp_processor_id()); this_leaf->nb = node_to_amd_nb(node); if (this_leaf->nb && !this_leaf->nb->l3_cache.indices) amd_calc_l3_indices(this_leaf->nb); } /* * check whether a slot used for disabling an L3 index is occupied. * @l3: L3 cache descriptor * @slot: slot number (0..1) * * @returns: the disabled index if used or negative value if slot free. */ int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot) { unsigned int reg = 0; pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg); /* check whether this slot is activated already */ if (reg & (3UL << 30)) return reg & 0xfff; return -1; } static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, unsigned int slot) { int index; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return -EINVAL; index = amd_get_l3_disable_slot(this_leaf->base.nb, slot); if (index >= 0) return sprintf(buf, "%d\n", index); return sprintf(buf, "FREE\n"); } #define SHOW_CACHE_DISABLE(slot) \ static ssize_t \ show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \ unsigned int cpu) \ { \ return show_cache_disable(this_leaf, buf, slot); \ } SHOW_CACHE_DISABLE(0) SHOW_CACHE_DISABLE(1) static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long idx) { int i; idx |= BIT(30); /* * disable index in all 4 subcaches */ for (i = 0; i < 4; i++) { u32 reg = idx | (i << 20); if (!nb->l3_cache.subcaches[i]) continue; pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); /* * We need to WBINVD on a core on the node containing the L3 * cache which indices we disable therefore a simple wbinvd() * is not sufficient. */ wbinvd_on_cpu(cpu); reg |= BIT(31); pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg); } } /* * disable a L3 cache index by using a disable-slot * * @l3: L3 cache descriptor * @cpu: A CPU on the node containing the L3 cache * @slot: slot number (0..1) * @index: index to disable * * @return: 0 on success, error status on failure */ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, unsigned long index) { int ret = 0; /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(nb, slot); if (ret >= 0) return -EINVAL; if (index > nb->l3_cache.indices) return -EINVAL; /* check whether the other slot has disabled the same index already */ if (index == amd_get_l3_disable_slot(nb, !slot)) return -EINVAL; amd_l3_disable_index(nb, cpu, slot, index); return 0; } static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, size_t count, unsigned int slot) { unsigned long val = 0; int cpu, err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) return -EINVAL; cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); if (err) { if (err == -EEXIST) printk(KERN_WARNING "L3 disable slot %d in use!\n", slot); return err; } return count; } #define STORE_CACHE_DISABLE(slot) \ static ssize_t \ store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ const char *buf, size_t count, \ unsigned int cpu) \ { \ return store_cache_disable(this_leaf, buf, count, slot); \ } STORE_CACHE_DISABLE(0) STORE_CACHE_DISABLE(1) static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644, show_cache_disable_0, store_cache_disable_0); static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, show_cache_disable_1, store_cache_disable_1); static ssize_t show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; return sprintf(buf, "%x\n", amd_get_subcaches(cpu)); } static ssize_t store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count, unsigned int cpu) { unsigned long val; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) return -EINVAL; if (strict_strtoul(buf, 16, &val) < 0) return -EINVAL; if (amd_set_subcaches(cpu, val)) return -EINVAL; return count; } static struct _cache_attr subcaches = __ATTR(subcaches, 0644, show_subcaches, store_subcaches); #else /* CONFIG_AMD_NB */ #define amd_init_l3_cache(x, y) #endif /* CONFIG_AMD_NB */ static int __cpuinit cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) { union _cpuid4_leaf_eax eax; union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ecx ecx; unsigned edx; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { amd_cpuid4(index, &eax, &ebx, &ecx); amd_init_l3_cache(this_leaf, index); } else { cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); } if (eax.split.type == CACHE_TYPE_NULL) return -EIO; /* better error ? */ this_leaf->eax = eax; this_leaf->ebx = ebx; this_leaf->ecx = ecx; this_leaf->size = (ecx.split.number_of_sets + 1) * (ebx.split.coherency_line_size + 1) * (ebx.split.physical_line_partition + 1) * (ebx.split.ways_of_associativity + 1); return 0; } static int __cpuinit find_num_cache_leaves(void) { unsigned int eax, ebx, ecx, edx; union _cpuid4_leaf_eax cache_eax; int i = -1; do { ++i; /* Do cpuid(4) loop to find out num_cache_leaves */ cpuid_count(4, i, &eax, &ebx, &ecx, &edx); cache_eax.full = eax; } while (cache_eax.split.type != CACHE_TYPE_NULL); return i; } unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) { /* Cache sizes */ unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; #ifdef CONFIG_X86_HT unsigned int cpu = c->cpu_index; #endif if (c->cpuid_level > 3) { static int is_initialized; if (is_initialized == 0) { /* Init num_cache_leaves from boot CPU */ num_cache_leaves = find_num_cache_leaves(); is_initialized++; } /* * Whenever possible use cpuid(4), deterministic cache * parameters cpuid leaf to find the cache details */ for (i = 0; i < num_cache_leaves; i++) { struct _cpuid4_info_regs this_leaf; int retval; retval = cpuid4_cache_lookup_regs(i, &this_leaf); if (retval >= 0) { switch (this_leaf.eax.split.level) { case 1: if (this_leaf.eax.split.type == CACHE_TYPE_DATA) new_l1d = this_leaf.size/1024; else if (this_leaf.eax.split.type == CACHE_TYPE_INST) new_l1i = this_leaf.size/1024; break; case 2: new_l2 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order(num_threads_sharing); l2_id = c->apicid >> index_msb; break; case 3: new_l3 = this_leaf.size/1024; num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; index_msb = get_count_order( num_threads_sharing); l3_id = c->apicid >> index_msb; break; default: break; } } } } /* * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for * trace cache */ if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) { /* supports eax=2 call */ int j, n; unsigned int regs[4]; unsigned char *dp = (unsigned char *)regs; int only_trace = 0; if (num_cache_leaves != 0 && c->x86 == 15) only_trace = 1; /* Number of times to iterate */ n = cpuid_eax(2) & 0xFF; for (i = 0 ; i < n ; i++) { cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); /* If bit 31 is set, this is an unknown format */ for (j = 0 ; j < 3 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; /* Byte 0 is level count, not a descriptor */ for (j = 1 ; j < 16 ; j++) { unsigned char des = dp[j]; unsigned char k = 0; /* look up this descriptor in the table */ while (cache_table[k].descriptor != 0) { if (cache_table[k].descriptor == des) { if (only_trace && cache_table[k].cache_type != LVL_TRACE) break; switch (cache_table[k].cache_type) { case LVL_1_INST: l1i += cache_table[k].size; break; case LVL_1_DATA: l1d += cache_table[k].size; break; case LVL_2: l2 += cache_table[k].size; break; case LVL_3: l3 += cache_table[k].size; break; case LVL_TRACE: trace += cache_table[k].size; break; } break; } k++; } } } } if (new_l1d) l1d = new_l1d; if (new_l1i) l1i = new_l1i; if (new_l2) { l2 = new_l2; #ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l2_id; #endif } if (new_l3) { l3 = new_l3; #ifdef CONFIG_X86_HT per_cpu(cpu_llc_id, cpu) = l3_id; #endif } c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); return l2; } #ifdef CONFIG_SYSFS /* pointer to _cpuid4_info array (for each cache leaf) */ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) #ifdef CONFIG_SMP static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; unsigned long num_threads_sharing; int index_msb, i, sibling; struct cpuinfo_x86 *c = &cpu_data(cpu); if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { for_each_cpu(i, cpu_llc_shared_mask(cpu)) { if (!per_cpu(ici_cpuid4_info, i)) continue; this_leaf = CPUID4_INFO_IDX(i, index); for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) { if (!cpu_online(sibling)) continue; set_bit(sibling, this_leaf->shared_cpu_map); } } return; } this_leaf = CPUID4_INFO_IDX(cpu, index); num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; if (num_threads_sharing == 1) cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); else { index_msb = get_count_order(num_threads_sharing); for_each_online_cpu(i) { if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) { cpumask_set_cpu(i, to_cpumask(this_leaf->shared_cpu_map)); if (i != cpu && per_cpu(ici_cpuid4_info, i)) { sibling_leaf = CPUID4_INFO_IDX(i, index); cpumask_set_cpu(cpu, to_cpumask( sibling_leaf->shared_cpu_map)); } } } } } static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { struct _cpuid4_info *this_leaf, *sibling_leaf; int sibling; this_leaf = CPUID4_INFO_IDX(cpu, index); for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { sibling_leaf = CPUID4_INFO_IDX(sibling, index); cpumask_clear_cpu(cpu, to_cpumask(sibling_leaf->shared_cpu_map)); } } #else static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) { } static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { } #endif static void __cpuinit free_cache_attributes(unsigned int cpu) { int i; for (i = 0; i < num_cache_leaves; i++) cache_remove_shared_cpu_map(cpu, i); kfree(per_cpu(ici_cpuid4_info, cpu)); per_cpu(ici_cpuid4_info, cpu) = NULL; } static void __cpuinit get_cpu_leaves(void *_retval) { int j, *retval = _retval, cpu = smp_processor_id(); /* Do cpuid and store the results */ for (j = 0; j < num_cache_leaves; j++) { struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j); *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base); if (unlikely(*retval < 0)) { int i; for (i = 0; i < j; i++) cache_remove_shared_cpu_map(cpu, i); break; } cache_shared_cpu_map_setup(cpu, j); } } static int __cpuinit detect_cache_attributes(unsigned int cpu) { int retval; if (num_cache_leaves == 0) return -ENOENT; per_cpu(ici_cpuid4_info, cpu) = kzalloc( sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); if (per_cpu(ici_cpuid4_info, cpu) == NULL) return -ENOMEM; smp_call_function_single(cpu, get_cpu_leaves, &retval, true); if (retval) { kfree(per_cpu(ici_cpuid4_info, cpu)); per_cpu(ici_cpuid4_info, cpu) = NULL; } return retval; } #include <linux/kobject.h> #include <linux/sysfs.h> extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */ /* pointer to kobject for cpuX/cache */ static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject); struct _index_kobject { struct kobject kobj; unsigned int cpu; unsigned short index; }; /* pointer to array of kobjects for cpuX/cache/indexY */ static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject); #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y])) #define show_one_plus(file_name, object, val) \ static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \ unsigned int cpu) \ { \ return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \ } show_one_plus(level, base.eax.split.level, 0); show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1); show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1); show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1); show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1); static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { return sprintf(buf, "%luK\n", this_leaf->base.size / 1024); } static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, int type, char *buf) { ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; int n = 0; if (len > 1) { const struct cpumask *mask; mask = to_cpumask(this_leaf->shared_cpu_map); n = type ? cpulist_scnprintf(buf, len-2, mask) : cpumask_scnprintf(buf, len-2, mask); buf[n++] = '\n'; buf[n] = '\0'; } return n; } static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf, unsigned int cpu) { return show_shared_cpu_map_func(leaf, 0, buf); } static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf, unsigned int cpu) { return show_shared_cpu_map_func(leaf, 1, buf); } static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu) { switch (this_leaf->base.eax.split.type) { case CACHE_TYPE_DATA: return sprintf(buf, "Data\n"); case CACHE_TYPE_INST: return sprintf(buf, "Instruction\n"); case CACHE_TYPE_UNIFIED: return sprintf(buf, "Unified\n"); default: return sprintf(buf, "Unknown\n"); } } #define to_object(k) container_of(k, struct _index_kobject, kobj) #define to_attr(a) container_of(a, struct _cache_attr, attr) #define define_one_ro(_name) \ static struct _cache_attr _name = \ __ATTR(_name, 0444, show_##_name, NULL) define_one_ro(level); define_one_ro(type); define_one_ro(coherency_line_size); define_one_ro(physical_line_partition); define_one_ro(ways_of_associativity); define_one_ro(number_of_sets); define_one_ro(size); define_one_ro(shared_cpu_map); define_one_ro(shared_cpu_list); static struct attribute *default_attrs[] = { &type.attr, &level.attr, &coherency_line_size.attr, &physical_line_partition.attr, &ways_of_associativity.attr, &number_of_sets.attr, &size.attr, &shared_cpu_map.attr, &shared_cpu_list.attr, NULL }; #ifdef CONFIG_AMD_NB static struct attribute ** __cpuinit amd_l3_attrs(void) { static struct attribute **attrs; int n; if (attrs) return attrs; n = sizeof (default_attrs) / sizeof (struct attribute *); if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) n += 2; if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) n += 1; attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); if (attrs == NULL) return attrs = default_attrs; for (n = 0; default_attrs[n]; n++) attrs[n] = default_attrs[n]; if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { attrs[n++] = &cache_disable_0.attr; attrs[n++] = &cache_disable_1.attr; } if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING)) attrs[n++] = &subcaches.attr; return attrs; } #endif static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) { struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->show ? fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf, this_leaf->cpu) : 0; return ret; } static ssize_t store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct _cache_attr *fattr = to_attr(attr); struct _index_kobject *this_leaf = to_object(kobj); ssize_t ret; ret = fattr->store ? fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index), buf, count, this_leaf->cpu) : 0; return ret; } static const struct sysfs_ops sysfs_ops = { .show = show, .store = store, }; static struct kobj_type ktype_cache = { .sysfs_ops = &sysfs_ops, .default_attrs = default_attrs, }; static struct kobj_type ktype_percpu_entry = { .sysfs_ops = &sysfs_ops, }; static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) { kfree(per_cpu(ici_cache_kobject, cpu)); kfree(per_cpu(ici_index_kobject, cpu)); per_cpu(ici_cache_kobject, cpu) = NULL; per_cpu(ici_index_kobject, cpu) = NULL; free_cache_attributes(cpu); } static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) { int err; if (num_cache_leaves == 0) return -ENOENT; err = detect_cache_attributes(cpu); if (err) return err; /* Allocate all required memory */ per_cpu(ici_cache_kobject, cpu) = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL)) goto err_out; per_cpu(ici_index_kobject, cpu) = kzalloc( sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL); if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL)) goto err_out; return 0; err_out: cpuid4_cache_sysfs_exit(cpu); return -ENOMEM; } static DECLARE_BITMAP(cache_dev_map, NR_CPUS); /* Add/Remove cache interface for CPU device */ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i, j; struct _index_kobject *this_object; struct _cpuid4_info *this_leaf; int retval; retval = cpuid4_cache_sysfs_init(cpu); if (unlikely(retval < 0)) return retval; retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu), &ktype_percpu_entry, &sys_dev->kobj, "%s", "cache"); if (retval < 0) { cpuid4_cache_sysfs_exit(cpu); return retval; } for (i = 0; i < num_cache_leaves; i++) { this_object = INDEX_KOBJECT_PTR(cpu, i); this_object->cpu = cpu; this_object->index = i; this_leaf = CPUID4_INFO_IDX(cpu, i); ktype_cache.default_attrs = default_attrs; #ifdef CONFIG_AMD_NB if (this_leaf->base.nb) ktype_cache.default_attrs = amd_l3_attrs(); #endif retval = kobject_init_and_add(&(this_object->kobj), &ktype_cache, per_cpu(ici_cache_kobject, cpu), "index%1lu", i); if (unlikely(retval)) { for (j = 0; j < i; j++) kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj)); kobject_put(per_cpu(ici_cache_kobject, cpu)); cpuid4_cache_sysfs_exit(cpu); return retval; } kobject_uevent(&(this_object->kobj), KOBJ_ADD); } cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD); return 0; } static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) { unsigned int cpu = sys_dev->id; unsigned long i; if (per_cpu(ici_cpuid4_info, cpu) == NULL) return; if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) return; cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); for (i = 0; i < num_cache_leaves; i++) kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj)); kobject_put(per_cpu(ici_cache_kobject, cpu)); cpuid4_cache_sysfs_exit(cpu); } static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct sys_device *sys_dev; sys_dev = get_cpu_sysdev(cpu); switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: cache_add_dev(sys_dev); break; case CPU_DEAD: case CPU_DEAD_FROZEN: cache_remove_dev(sys_dev); break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { .notifier_call = cacheinfo_cpu_callback, }; static int __cpuinit cache_sysfs_init(void) { int i; if (num_cache_leaves == 0) return 0; for_each_online_cpu(i) { int err; struct sys_device *sys_dev = get_cpu_sysdev(i); err = cache_add_dev(sys_dev); if (err) return err; } register_hotcpu_notifier(&cacheinfo_cpu_notifier); return 0; } device_initcall(cache_sysfs_init); #endif
madprogrammer/linux-e8-rt
arch/x86/kernel/cpu/intel_cacheinfo.c
C
gpl-2.0
32,132
function test() { waitForExplicitFinish(); var win = openDialog(getBrowserURL(), "_blank", "chrome,all,dialog=no"); win.addEventListener("load", function () { win.removeEventListener("load", arguments.callee, false); win.content.addEventListener("focus", function () { win.content.removeEventListener("focus", arguments.callee, false); win.gBrowser.selectedTab.addEventListener("TabClose", function () { ok(false, "shouldn't have gotten the TabClose event for the last tab"); }, false); EventUtils.synthesizeKey("w", { accelKey: true }, win); ok(win.closed, "accel+w closed the window immediately"); finish(); }, false); }, false); }
freaktechnik/nightingale-hacking
dependencies/vendor/mozbrowser/base/content/test/browser_bug481560.js
JavaScript
gpl-2.0
705
/* * Copyright (c) 2008-2015 CoNWeT Lab., Universidad Politécnica de Madrid * * This file is part of Wirecloud Platform. * * Wirecloud Platform is free software: you can redistribute it and/or * modify it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * Wirecloud is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public * License for more details. * * You should have received a copy of the GNU Affero General Public License * along with Wirecloud Platform. If not, see * <http://www.gnu.org/licenses/>. * */ /*global StyledElements*/ (function () { "use strict"; /** * @experimental * */ var Separator = function Separator() { StyledElements.StyledElement.call(this, []); this.wrapperElement = document.createElement("hr"); }; Separator.prototype = new StyledElements.StyledElement(); Separator.prototype.destroy = function destroy() { if (StyledElements.Utils.XML.isElement(this.wrapperElement.parentNode)) { StyledElements.Utils.removeFromParent(this.wrapperElement); } StyledElements.StyledElement.prototype.destroy.call(this); }; StyledElements.Separator = Separator; })();
sixuanwang/SAMSaaS
wirecloud-develop/src/wirecloud/commons/static/js/StyledElements/Separator.js
JavaScript
gpl-2.0
1,544
<?php if (!defined('BASEPATH')) exit('No direct script access allowed'); /** * ExpressionEngine - by EllisLab * * @package ExpressionEngine * @author EllisLab Dev Team * @copyright Copyright (c) 2003 - 2014, EllisLab, Inc. * @license http://ellislab.com/expressionengine/user-guide/license.html * @link http://ellislab.com * @since Version 2.0 * @filesource */ // ------------------------------------------------------------------------ /** * ExpressionEngine Admin Model * * @package ExpressionEngine * @subpackage Core * @category Model * @author EllisLab Dev Team * @link http://ellislab.com */ class Admin_model extends CI_Model { /** * Get XML Encodings * * Returns an associative array of XML language keys and values * * @access public * @return array */ function get_xml_encodings() { static $encodings; if ( ! isset($encodings)) { $file = APPPATH.'config/languages.php'; if ( ! file_exists($file)) { return FALSE; } require_once $file; $encodings = array_flip($languages); unset($languages); } return $encodings; } // -------------------------------------------------------------------- /** * Get Installed Language Packs * * Returns an array of installed language packs * * @access public * @return array */ function get_installed_language_packs() { static $languages; if ( ! isset($languages)) { $this->load->helper('directory'); $source_dir = APPPATH.'language/'; if (($list = directory_map($source_dir, TRUE)) !== FALSE) { foreach ($list as $file) { if (is_dir($source_dir.$file) && $file[0] != '.') { $languages[$file] = ucfirst($file); } } ksort($languages); } } return $languages; } // -------------------------------------------------------------------- /** * Theme List * * Fetch installed CP Theme list * * @access public * @return array */ function get_cp_theme_list() { $this->load->library('user_agent'); static $themes; if ( ! isset($themes)) { $this->load->helper('directory'); if (($list = directory_map(PATH_CP_THEME, TRUE)) !== FALSE) { foreach ($list as $file) { if (is_dir(PATH_CP_THEME.$file) && $file[0] != '.') { if (substr($file, 0, 6) == 'mobile' && ! $this->agent->is_mobile()) { continue; } else { $themes[$file] = ucfirst(str_replace('_', ' ', $file)); } } } ksort($themes); } } return $themes; } // -------------------------------------------------------------------- /** * Template List * * Generates an array for the site template selection lists * * @access public * @param string * @return string */ function get_template_list() { static $templates; if ( ! isset($templates)) { $sql = "SELECT exp_template_groups.group_name, exp_templates.template_name FROM exp_template_groups, exp_templates WHERE exp_template_groups.group_id = exp_templates.group_id AND exp_template_groups.site_id = '".$this->db->escape_str($this->config->item('site_id'))."' "; $sql .= " ORDER BY exp_template_groups.group_name, exp_templates.template_name"; $query = $this->db->query($sql); foreach ($query->result_array() as $row) { $templates[$row['group_name'].'/'.$row['template_name']] = $row['group_name'].'/'.$row['template_name']; } } return $templates; } // -------------------------------------------------------------------- /** * Get HTML Buttons * * @access public * @param int member_id * @param bool if the default button set should be loaded if user has no buttons * @return object */ function get_html_buttons($member_id = 0, $load_default_buttons = TRUE) { $this->db->from('html_buttons'); $this->db->where('site_id', $this->config->item('site_id')); $this->db->where('member_id', $member_id); $this->db->order_by('tag_order'); $buttons = $this->db->get(); // count the buttons, if there aren't any, return the default button set if ($buttons->num_rows() == 0 AND $load_default_buttons === TRUE) { $this->db->from('html_buttons'); $this->db->where('site_id', $this->config->item('site_id')); $this->db->where('member_id', 0); $this->db->order_by('tag_order'); $buttons = $this->db->get(); } return $buttons; } // -------------------------------------------------------------------- /** * Delete HTML Button * * @access public * @return NULL */ function delete_html_button($id) { $this->db->from('html_buttons'); $this->db->where('site_id', $this->config->item('site_id')); $this->db->where('id', $id); $this->db->delete(); } // -------------------------------------------------------------------- /** * Update HTML Buttons * * @access public * @return object */ function update_html_buttons($member_id, $buttons, $remove_buttons = TRUE) { if ($remove_buttons != FALSE) { // remove all buttons for this member $this->db->where('site_id', $this->config->item('site_id')); $this->db->where('member_id', $member_id); $this->db->from('html_buttons'); $this->db->delete(); } // now add in the new buttons foreach ($buttons as $button) { $this->db->insert('html_buttons', $button); } } // -------------------------------------------------------------------- /** * Unique Upload Name * * @access public * @return boolean */ function unique_upload_name($name, $cur_name, $edit) { $this->db->where('site_id', $this->config->item('site_id')); $this->db->where('name', $name); $this->db->from('upload_prefs'); $count = $this->db->count_all_results(); if (($edit == FALSE OR ($edit == TRUE && $name != $cur_name)) && $count > 0) { return TRUE; } else { return FALSE; } } } /* End of file admin_model.php */ /* Location: ./system/expressionengine/models/admin_model.php */
runrobrun/ee-xc-site
xcrunner/expressionengine/models/admin_model.php
PHP
gpl-2.0
5,957
//Nav jQuery(document).ready(function ($){ jQuery('.navbar .nav > li > a, .footer-nav > li > a').click(function(){ jQuery.scrollTo( $(this).attr("href"), { duration: 1000, easing:'easeInOutExpo' }); return false; }); }); var tpj=jQuery; tpj.noConflict(); tpj(document).ready(function() { if (tpj.fn.cssOriginal!=undefined) tpj.fn.css = tpj.fn.cssOriginal; tpj('.banner').revolution( { delay:5000, startheight:520, startwidth:1170, hideThumbs:200, thumbWidth:100, // Thumb With and Height and Amount (only if navigation Tyope set to thumb !) thumbHeight:50, thumbAmount:5, navigationType:"bullet", // bullet, thumb, none navigationArrows:"nexttobullets", // nexttobullets, solo (old name verticalcentered), none navigationStyle:"round-old", // round,square,navbar,round-old,square-old,navbar-old, or any from the list in the docu (choose between 50+ different item), custom navigationHAlign:"center", // Vertical Align top,center,bottom navigationVAlign:"bottom", // Horizontal Align left,center,right navigationHOffset:0, navigationVOffset:-30, soloArrowLeftHalign:"left", soloArrowLeftValign:"top", soloArrowLeftHOffset:20, soloArrowLeftVOffset:0, soloArrowRightHalign:"right", soloArrowRightValign:"top", soloArrowRightHOffset:20, soloArrowRightVOffset:0, touchenabled:"on", // Enable Swipe Function : on/off onHoverStop:"off", // Stop Banner Timet at Hover on Slide on/off stopAtSlide:-1, // Stop Timer if Slide "x" has been Reached. If stopAfterLoops set to 0, then it stops already in the first Loop at slide X which defined. -1 means do not stop at any slide. stopAfterLoops has no sinn in this case. stopAfterLoops:-1, // Stop Timer if All slides has been played "x" times. IT will stop at THe slide which is defined via stopAtSlide:x, if set to -1 slide never stop automatic hideCaptionAtLimit:0, // It Defines if a caption should be shown under a Screen Resolution ( Basod on The Width of Browser) hideAllCaptionAtLilmit:0, // Hide all The Captions if Width of Browser is less then this value hideSliderAtLimit:0, // Hide the whole slider, and stop also functions if Width of Browser is less than this value shadow:1, //0 = no Shadow, 1,2,3 = 3 Different Art of Shadows (No Shadow in Fullwidth Version !) fullWidth:"off" // Turns On or Off the Fullwidth Image Centering in FullWidth Modus }); });
greenboxindonesia/greenbox.github.io
js/custom.js
JavaScript
gpl-2.0
2,483
<?php /** * Created on Dec 01, 2007 * * Copyright © 2007 Yuri Astrakhan <Firstname><Lastname>@gmail.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * http://www.gnu.org/copyleft/gpl.html * * @file */ /** * @ingroup API */ class ApiParse extends ApiBase { private $section, $text, $pstText = null; public function __construct( $main, $action ) { parent::__construct( $main, $action ); } public function execute() { // The data is hot but user-dependent, like page views, so we set vary cookies $this->getMain()->setCacheMode( 'anon-public-user-private' ); // Get parameters $params = $this->extractRequestParams(); $text = $params['text']; $title = $params['title']; $page = $params['page']; $pageid = $params['pageid']; $oldid = $params['oldid']; if ( !is_null( $page ) && ( !is_null( $text ) || $title != 'API' ) ) { $this->dieUsage( 'The page parameter cannot be used together with the text and title parameters', 'params' ); } $prop = array_flip( $params['prop'] ); if ( isset( $params['section'] ) ) { $this->section = $params['section']; } else { $this->section = false; } // The parser needs $wgTitle to be set, apparently the // $title parameter in Parser::parse isn't enough *sigh* // TODO: Does this still need $wgTitle? global $wgParser, $wgTitle, $wgLang; // Currently unnecessary, code to act as a safeguard against any change in current behaviour of uselang breaks $oldLang = null; if ( isset( $params['uselang'] ) && $params['uselang'] != $wgLang->getCode() ) { $oldLang = $wgLang; // Backup wgLang $wgLang = Language::factory( $params['uselang'] ); } $popts = ParserOptions::newFromContext( $this->getContext() ); $popts->setTidy( true ); $popts->enableLimitReport( !$params['disablepp'] ); $redirValues = null; // Return result $result = $this->getResult(); if ( !is_null( $oldid ) || !is_null( $pageid ) || !is_null( $page ) ) { if ( !is_null( $oldid ) ) { // Don't use the parser cache $rev = Revision::newFromID( $oldid ); if ( !$rev ) { $this->dieUsage( "There is no revision ID $oldid", 'missingrev' ); } if ( !$rev->userCan( Revision::DELETED_TEXT, $this->getUser() ) ) { $this->dieUsage( "You don't have permission to view deleted revisions", 'permissiondenied' ); } $titleObj = $rev->getTitle(); $wgTitle = $titleObj; // If for some reason the "oldid" is actually the current revision, it may be cached if ( $titleObj->getLatestRevID() === intval( $oldid ) ) { // May get from/save to parser cache $p_result = $this->getParsedSectionOrText( $titleObj, $popts, $pageid, isset( $prop['wikitext'] ) ) ; } else { // This is an old revision, so get the text differently $this->text = $rev->getText( Revision::FOR_THIS_USER, $this->getUser() ); if ( $this->section !== false ) { $this->text = $this->getSectionText( $this->text, 'r' . $rev->getId() ); } // Should we save old revision parses to the parser cache? $p_result = $wgParser->parse( $this->text, $titleObj, $popts ); } } else { // Not $oldid, but $pageid or $page if ( $params['redirects'] ) { $reqParams = array( 'action' => 'query', 'redirects' => '', ); if ( !is_null ( $pageid ) ) { $reqParams['pageids'] = $pageid; } else { // $page $reqParams['titles'] = $page; } $req = new FauxRequest( $reqParams ); $main = new ApiMain( $req ); $main->execute(); $data = $main->getResultData(); $redirValues = isset( $data['query']['redirects'] ) ? $data['query']['redirects'] : array(); $to = $page; foreach ( (array)$redirValues as $r ) { $to = $r['to']; } $titleObj = Title::newFromText( $to ); } else { if ( !is_null ( $pageid ) ) { $reqParams['pageids'] = $pageid; $titleObj = Title::newFromID( $pageid ); } else { // $page $to = $page; $titleObj = Title::newFromText( $to ); } } if ( !is_null ( $pageid ) ) { if ( !$titleObj ) { // Still throw nosuchpageid error if pageid was provided $this->dieUsageMsg( array( 'nosuchpageid', $pageid ) ); } } elseif ( !$titleObj || !$titleObj->exists() ) { $this->dieUsage( "The page you specified doesn't exist", 'missingtitle' ); } $wgTitle = $titleObj; if ( isset( $prop['revid'] ) ) { $oldid = $titleObj->getLatestRevID(); } // Potentially cached $p_result = $this->getParsedSectionOrText( $titleObj, $popts, $pageid, isset( $prop['wikitext'] ) ) ; } } else { // Not $oldid, $pageid, $page. Hence based on $text if ( is_null( $text ) ) { $this->dieUsage( 'The text parameter should be passed with the title parameter. Should you be using the "page" parameter instead?', 'params' ); } $this->text = $text; $titleObj = Title::newFromText( $title ); if ( !$titleObj ) { $this->dieUsageMsg( array( 'invalidtitle', $title ) ); } $wgTitle = $titleObj; if ( $this->section !== false ) { $this->text = $this->getSectionText( $this->text, $titleObj->getText() ); } if ( $params['pst'] || $params['onlypst'] ) { $this->pstText = $wgParser->preSaveTransform( $this->text, $titleObj, $this->getUser(), $popts ); } if ( $params['onlypst'] ) { // Build a result and bail out $result_array = array(); $result_array['text'] = array(); $result->setContent( $result_array['text'], $this->pstText ); if ( isset( $prop['wikitext'] ) ) { $result_array['wikitext'] = array(); $result->setContent( $result_array['wikitext'], $this->text ); } $result->addValue( null, $this->getModuleName(), $result_array ); return; } // Not cached (save or load) $p_result = $wgParser->parse( $params['pst'] ? $this->pstText : $this->text, $titleObj, $popts ); } $result_array = array(); $result_array['title'] = $titleObj->getPrefixedText(); if ( !is_null( $oldid ) ) { $result_array['revid'] = intval( $oldid ); } if ( $params['redirects'] && !is_null( $redirValues ) ) { $result_array['redirects'] = $redirValues; } if ( isset( $prop['text'] ) ) { $result_array['text'] = array(); $result->setContent( $result_array['text'], $p_result->getText() ); } if ( !is_null( $params['summary'] ) ) { $result_array['parsedsummary'] = array(); $result->setContent( $result_array['parsedsummary'], Linker::formatComment( $params['summary'], $titleObj ) ); } if ( isset( $prop['langlinks'] ) ) { $result_array['langlinks'] = $this->formatLangLinks( $p_result->getLanguageLinks() ); } if ( isset( $prop['languageshtml'] ) ) { $languagesHtml = $this->languagesHtml( $p_result->getLanguageLinks() ); $result_array['languageshtml'] = array(); $result->setContent( $result_array['languageshtml'], $languagesHtml ); } if ( isset( $prop['categories'] ) ) { $result_array['categories'] = $this->formatCategoryLinks( $p_result->getCategories() ); } if ( isset( $prop['categorieshtml'] ) ) { $categoriesHtml = $this->categoriesHtml( $p_result->getCategories() ); $result_array['categorieshtml'] = array(); $result->setContent( $result_array['categorieshtml'], $categoriesHtml ); } if ( isset( $prop['links'] ) ) { $result_array['links'] = $this->formatLinks( $p_result->getLinks() ); } if ( isset( $prop['templates'] ) ) { $result_array['templates'] = $this->formatLinks( $p_result->getTemplates() ); } if ( isset( $prop['images'] ) ) { $result_array['images'] = array_keys( $p_result->getImages() ); } if ( isset( $prop['externallinks'] ) ) { $result_array['externallinks'] = array_keys( $p_result->getExternalLinks() ); } if ( isset( $prop['sections'] ) ) { $result_array['sections'] = $p_result->getSections(); } if ( isset( $prop['displaytitle'] ) ) { $result_array['displaytitle'] = $p_result->getDisplayTitle() ? $p_result->getDisplayTitle() : $titleObj->getPrefixedText(); } if ( isset( $prop['headitems'] ) || isset( $prop['headhtml'] ) ) { $context = $this->getContext(); $context->setTitle( $titleObj ); $context->getOutput()->addParserOutputNoText( $p_result ); if ( isset( $prop['headitems'] ) ) { $headItems = $this->formatHeadItems( $p_result->getHeadItems() ); $css = $this->formatCss( $context->getOutput()->buildCssLinksArray() ); $scripts = array( $context->getOutput()->getHeadScripts() ); $result_array['headitems'] = array_merge( $headItems, $css, $scripts ); } if ( isset( $prop['headhtml'] ) ) { $result_array['headhtml'] = array(); $result->setContent( $result_array['headhtml'], $context->getOutput()->headElement( $context->getSkin() ) ); } } if ( isset( $prop['iwlinks'] ) ) { $result_array['iwlinks'] = $this->formatIWLinks( $p_result->getInterwikiLinks() ); } if ( isset( $prop['wikitext'] ) ) { $result_array['wikitext'] = array(); $result->setContent( $result_array['wikitext'], $this->text ); if ( !is_null( $this->pstText ) ) { $result_array['psttext'] = array(); $result->setContent( $result_array['psttext'], $this->pstText ); } } $result_mapping = array( 'redirects' => 'r', 'langlinks' => 'll', 'categories' => 'cl', 'links' => 'pl', 'templates' => 'tl', 'images' => 'img', 'externallinks' => 'el', 'iwlinks' => 'iw', 'sections' => 's', 'headitems' => 'hi', ); $this->setIndexedTagNames( $result_array, $result_mapping ); $result->addValue( null, $this->getModuleName(), $result_array ); if ( !is_null( $oldLang ) ) { $wgLang = $oldLang; // Reset $wgLang to $oldLang } } /** * @param $titleObj Title * @param $popts ParserOptions * @param $pageId Int * @param $getWikitext Bool * @return ParserOutput */ private function getParsedSectionOrText( $titleObj, $popts, $pageId = null, $getWikitext = false ) { global $wgParser; $page = WikiPage::factory( $titleObj ); if ( $this->section !== false ) { $this->text = $this->getSectionText( $page->getRawText(), !is_null( $pageId ) ? 'page id ' . $pageId : $titleObj->getText() ); // Not cached (save or load) return $wgParser->parse( $this->text, $titleObj, $popts ); } else { // Try the parser cache first // getParserOutput will save to Parser cache if able $pout = $page->getParserOutput( $popts ); if ( $getWikitext ) { $this->text = $page->getRawText(); } return $pout; } } private function getSectionText( $text, $what ) { global $wgParser; // Not cached (save or load) $text = $wgParser->getSection( $text, $this->section, false ); if ( $text === false ) { $this->dieUsage( "There is no section {$this->section} in " . $what, 'nosuchsection' ); } return $text; } private function formatLangLinks( $links ) { $result = array(); foreach ( $links as $link ) { $entry = array(); $bits = explode( ':', $link, 2 ); $title = Title::newFromText( $link ); $entry['lang'] = $bits[0]; if ( $title ) { $entry['url'] = wfExpandUrl( $title->getFullURL(), PROTO_CURRENT ); } $this->getResult()->setContent( $entry, $bits[1] ); $result[] = $entry; } return $result; } private function formatCategoryLinks( $links ) { $result = array(); foreach ( $links as $link => $sortkey ) { $entry = array(); $entry['sortkey'] = $sortkey; $this->getResult()->setContent( $entry, $link ); $result[] = $entry; } return $result; } private function categoriesHtml( $categories ) { $context = $this->getContext(); $context->getOutput()->addCategoryLinks( $categories ); return $context->getSkin()->getCategories(); } /** * @deprecated since 1.18 No modern skin generates language links this way, please use language links * data to generate your own HTML. * @param $languages array * @return string */ private function languagesHtml( $languages ) { wfDeprecated( __METHOD__, '1.18' ); global $wgContLang, $wgHideInterlanguageLinks; if ( $wgHideInterlanguageLinks || count( $languages ) == 0 ) { return ''; } $s = htmlspecialchars( wfMsg( 'otherlanguages' ) . wfMsg( 'colon-separator' ) ); $langs = array(); foreach ( $languages as $l ) { $nt = Title::newFromText( $l ); $text = Language::fetchLanguageName( $nt->getInterwiki() ); $langs[] = Html::element( 'a', array( 'href' => $nt->getFullURL(), 'title' => $nt->getText(), 'class' => "external" ), $text == '' ? $l : $text ); } $s .= implode( htmlspecialchars( wfMsgExt( 'pipe-separator', 'escapenoentities' ) ), $langs ); if ( $wgContLang->isRTL() ) { $s = Html::rawElement( 'span', array( 'dir' => "LTR" ), $s ); } return $s; } private function formatLinks( $links ) { $result = array(); foreach ( $links as $ns => $nslinks ) { foreach ( $nslinks as $title => $id ) { $entry = array(); $entry['ns'] = $ns; $this->getResult()->setContent( $entry, Title::makeTitle( $ns, $title )->getFullText() ); if ( $id != 0 ) { $entry['exists'] = ''; } $result[] = $entry; } } return $result; } private function formatIWLinks( $iw ) { $result = array(); foreach ( $iw as $prefix => $titles ) { foreach ( array_keys( $titles ) as $title ) { $entry = array(); $entry['prefix'] = $prefix; $title = Title::newFromText( "{$prefix}:{$title}" ); if ( $title ) { $entry['url'] = wfExpandUrl( $title->getFullURL(), PROTO_CURRENT ); } $this->getResult()->setContent( $entry, $title->getFullText() ); $result[] = $entry; } } return $result; } private function formatHeadItems( $headItems ) { $result = array(); foreach ( $headItems as $tag => $content ) { $entry = array(); $entry['tag'] = $tag; $this->getResult()->setContent( $entry, $content ); $result[] = $entry; } return $result; } private function formatCss( $css ) { $result = array(); foreach ( $css as $file => $link ) { $entry = array(); $entry['file'] = $file; $this->getResult()->setContent( $entry, $link ); $result[] = $entry; } return $result; } private function setIndexedTagNames( &$array, $mapping ) { foreach ( $mapping as $key => $name ) { if ( isset( $array[$key] ) ) { $this->getResult()->setIndexedTagName( $array[$key], $name ); } } } public function getAllowedParams() { return array( 'title' => array( ApiBase::PARAM_DFLT => 'API', ), 'text' => null, 'summary' => null, 'page' => null, 'pageid' => array( ApiBase::PARAM_TYPE => 'integer', ), 'redirects' => false, 'oldid' => array( ApiBase::PARAM_TYPE => 'integer', ), 'prop' => array( ApiBase::PARAM_DFLT => 'text|langlinks|categories|links|templates|images|externallinks|sections|revid|displaytitle', ApiBase::PARAM_ISMULTI => true, ApiBase::PARAM_TYPE => array( 'text', 'langlinks', 'languageshtml', 'categories', 'categorieshtml', 'links', 'templates', 'images', 'externallinks', 'sections', 'revid', 'displaytitle', 'headitems', 'headhtml', 'iwlinks', 'wikitext', ) ), 'pst' => false, 'onlypst' => false, 'uselang' => null, 'section' => null, 'disablepp' => false, ); } public function getParamDescription() { $p = $this->getModulePrefix(); return array( 'text' => 'Wikitext to parse', 'summary' => 'Summary to parse', 'redirects' => "If the {$p}page or the {$p}pageid parameter is set to a redirect, resolve it", 'title' => 'Title of page the text belongs to', 'page' => "Parse the content of this page. Cannot be used together with {$p}text and {$p}title", 'pageid' => "Parse the content of this page. Overrides {$p}page", 'oldid' => "Parse the content of this revision. Overrides {$p}page and {$p}pageid", 'prop' => array( 'Which pieces of information to get', ' text - Gives the parsed text of the wikitext', ' langlinks - Gives the language links in the parsed wikitext', ' categories - Gives the categories in the parsed wikitext', ' categorieshtml - Gives the HTML version of the categories', ' languageshtml - Gives the HTML version of the language links', ' links - Gives the internal links in the parsed wikitext', ' templates - Gives the templates in the parsed wikitext', ' images - Gives the images in the parsed wikitext', ' externallinks - Gives the external links in the parsed wikitext', ' sections - Gives the sections in the parsed wikitext', ' revid - Adds the revision ID of the parsed page', ' displaytitle - Adds the title of the parsed wikitext', ' headitems - Gives items to put in the <head> of the page', ' headhtml - Gives parsed <head> of the page', ' iwlinks - Gives interwiki links in the parsed wikitext', ' wikitext - Gives the original wikitext that was parsed', ), 'pst' => array( 'Do a pre-save transform on the input before parsing it', 'Ignored if page, pageid or oldid is used' ), 'onlypst' => array( 'Do a pre-save transform (PST) on the input, but don\'t parse it', 'Returns the same wikitext, after a PST has been applied. Ignored if page, pageid or oldid is used' ), 'uselang' => 'Which language to parse the request in', 'section' => 'Only retrieve the content of this section number', 'disablepp' => 'Disable the PP Report from the parser output', ); } public function getDescription() { return 'Parses wikitext and returns parser output'; } public function getPossibleErrors() { return array_merge( parent::getPossibleErrors(), array( array( 'code' => 'params', 'info' => 'The page parameter cannot be used together with the text and title parameters' ), array( 'code' => 'params', 'info' => 'The text parameter should be passed with the title parameter. Should you be using the "page" parameter instead?' ), array( 'code' => 'missingrev', 'info' => 'There is no revision ID oldid' ), array( 'code' => 'permissiondenied', 'info' => 'You don\'t have permission to view deleted revisions' ), array( 'code' => 'missingtitle', 'info' => 'The page you specified doesn\'t exist' ), array( 'code' => 'nosuchsection', 'info' => 'There is no section sectionnumber in page' ), array( 'nosuchpageid' ), array( 'invalidtitle', 'title' ), ) ); } public function getExamples() { return array( 'api.php?action=parse&text={{Project:Sandbox}}' ); } public function getHelpUrls() { return 'https://www.mediawiki.org/wiki/API:Parsing_wikitext#parse'; } public function getVersion() { return __CLASS__ . ': $Id$'; } }
brion/MediaWiki
includes/api/ApiParse.php
PHP
gpl-2.0
19,495
package org.dolphinemu.dolphinemu.ui.settings; import org.dolphinemu.dolphinemu.NativeLibrary; import org.dolphinemu.dolphinemu.R; import org.dolphinemu.dolphinemu.model.settings.BooleanSetting; import org.dolphinemu.dolphinemu.model.settings.IntSetting; import org.dolphinemu.dolphinemu.model.settings.Setting; import org.dolphinemu.dolphinemu.model.settings.SettingSection; import org.dolphinemu.dolphinemu.model.settings.StringSetting; import org.dolphinemu.dolphinemu.model.settings.view.CheckBoxSetting; import org.dolphinemu.dolphinemu.model.settings.view.HeaderSetting; import org.dolphinemu.dolphinemu.model.settings.view.InputBindingSetting; import org.dolphinemu.dolphinemu.model.settings.view.SettingsItem; import org.dolphinemu.dolphinemu.model.settings.view.SingleChoiceSetting; import org.dolphinemu.dolphinemu.model.settings.view.SliderSetting; import org.dolphinemu.dolphinemu.model.settings.view.SubmenuSetting; import org.dolphinemu.dolphinemu.utils.EGLHelper; import org.dolphinemu.dolphinemu.utils.SettingsFile; import java.util.ArrayList; import java.util.HashMap; public final class SettingsFragmentPresenter { private SettingsFragmentView mView; private String mMenuTag; private ArrayList<HashMap<String, SettingSection>> mSettings; private ArrayList<SettingsItem> mSettingsList; private int mControllerNumber; private int mControllerType; public SettingsFragmentPresenter(SettingsFragmentView view) { mView = view; } public void onCreate(String menuTag) { if (menuTag.startsWith(SettingsFile.KEY_GCPAD_TYPE)) { mMenuTag = SettingsFile.KEY_GCPAD_TYPE; mControllerNumber = Character.getNumericValue(menuTag.charAt(menuTag.length() - 2)); mControllerType = Character.getNumericValue(menuTag.charAt(menuTag.length() - 1)); } else if (menuTag.startsWith(SettingsFile.SECTION_WIIMOTE) && !menuTag.equals(SettingsFile.FILE_NAME_WIIMOTE)) { mMenuTag = SettingsFile.SECTION_WIIMOTE; mControllerNumber = Character.getNumericValue(menuTag.charAt(menuTag.length() - 1)) + 3; } else if (menuTag.startsWith(SettingsFile.KEY_WIIMOTE_EXTENSION)) { mMenuTag = SettingsFile.KEY_WIIMOTE_EXTENSION; mControllerNumber = Character.getNumericValue(menuTag.charAt(menuTag.length() - 2)) + 3; mControllerType = Character.getNumericValue(menuTag.charAt(menuTag.length() - 1)); } else { mMenuTag = menuTag; } } public void onViewCreated(ArrayList<HashMap<String, SettingSection>> settings) { setSettings(settings); } /** * If the screen is rotated, the Activity will forget the settings map. This fragment * won't, though; so rather than have the Activity reload from disk, have the fragment pass * the settings map back to the Activity. */ public void onAttach() { if (mSettings != null) { mView.passSettingsToActivity(mSettings); } } public void putSetting(Setting setting) { mSettings.get(setting.getFile()).get(setting.getSection()).putSetting(setting); } public void loadDefaultSettings() { loadSettingsList(); } public void setSettings(ArrayList<HashMap<String, SettingSection>> settings) { if (mSettingsList == null && settings != null) { mSettings = settings; loadSettingsList(); } else { mView.showSettingsList(mSettingsList); } } private void loadSettingsList() { ArrayList<SettingsItem> sl = new ArrayList<>(); switch (mMenuTag) { case SettingsFile.FILE_NAME_DOLPHIN: addCoreSettings(sl); break; case SettingsFile.FILE_NAME_GFX: addGraphicsSettings(sl); break; case SettingsFile.FILE_NAME_GCPAD: addGcPadSettings(sl); break; case SettingsFile.FILE_NAME_WIIMOTE: addWiimoteSettings(sl); break; case SettingsFile.SECTION_GFX_ENHANCEMENTS: addEnhanceSettings(sl); break; case SettingsFile.SECTION_GFX_HACKS: addHackSettings(sl); break; case SettingsFile.KEY_GCPAD_TYPE: addGcPadSubSettings(sl, mControllerNumber, mControllerType); break; case SettingsFile.SECTION_WIIMOTE: addWiimoteSubSettings(sl, mControllerNumber); break; case SettingsFile.KEY_WIIMOTE_EXTENSION: addExtensionTypeSettings(sl, mControllerNumber, mControllerType); break; case SettingsFile.SECTION_STEREOSCOPY: addStereoSettings(sl); break; default: mView.showToastMessage("Unimplemented menu."); return; } mSettingsList = sl; mView.showSettingsList(mSettingsList); } private void addCoreSettings(ArrayList<SettingsItem> sl) { Setting cpuCore = null; Setting dualCore = null; Setting overclockEnable = null; Setting overclock = null; Setting continuousScan = null; Setting wiimoteSpeaker = null; Setting audioStretch = null; if (!mSettings.get(SettingsFile.SETTINGS_DOLPHIN).isEmpty()) { cpuCore = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_CPU_CORE); dualCore = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_DUAL_CORE); overclockEnable = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_OVERCLOCK_ENABLE); overclock = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_OVERCLOCK_PERCENT); continuousScan = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_WIIMOTE_SCAN); wiimoteSpeaker = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_WIIMOTE_SPEAKER); audioStretch = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_AUDIO_STRETCH); } else { mSettings.get(SettingsFile.SETTINGS_DOLPHIN).put(SettingsFile.SECTION_CORE, new SettingSection(SettingsFile.SECTION_CORE)); mView.passSettingsToActivity(mSettings); } // TODO: Having different emuCoresEntries/emuCoresValues for each architecture is annoying. // The proper solution would be to have one emuCoresEntries and one emuCoresValues // and exclude the values that aren't present in PowerPC::AvailableCPUCores(). int defaultCpuCore = NativeLibrary.DefaultCPUCore(); int emuCoresEntries; int emuCoresValues; if (defaultCpuCore == 1) // x86-64 { emuCoresEntries = R.array.emuCoresEntriesX86_64; emuCoresValues = R.array.emuCoresValuesX86_64; } else if (defaultCpuCore == 4) // AArch64 { emuCoresEntries = R.array.emuCoresEntriesARM64; emuCoresValues = R.array.emuCoresValuesARM64; } else { emuCoresEntries = R.array.emuCoresEntriesGeneric; emuCoresValues = R.array.emuCoresValuesGeneric; } sl.add(new SingleChoiceSetting(SettingsFile.KEY_CPU_CORE, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.cpu_core, 0, emuCoresEntries, emuCoresValues, defaultCpuCore, cpuCore)); sl.add(new CheckBoxSetting(SettingsFile.KEY_DUAL_CORE, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.dual_core, R.string.dual_core_descrip, true, dualCore)); sl.add(new CheckBoxSetting(SettingsFile.KEY_OVERCLOCK_ENABLE, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.overclock_enable, R.string.overclock_enable_description, false, overclockEnable)); sl.add(new SliderSetting(SettingsFile.KEY_OVERCLOCK_PERCENT, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.overclock_title, 0, 400, "%", 100, overclock)); sl.add(new CheckBoxSetting(SettingsFile.KEY_WIIMOTE_SCAN, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.wiimote_scanning, R.string.wiimote_scanning_description, true, continuousScan)); sl.add(new CheckBoxSetting(SettingsFile.KEY_WIIMOTE_SPEAKER, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.wiimote_speaker, R.string.wiimote_speaker_description, true, wiimoteSpeaker)); sl.add(new CheckBoxSetting(SettingsFile.KEY_AUDIO_STRETCH, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.audio_stretch, R.string.audio_stretch_description, false, audioStretch)); } private void addGcPadSettings(ArrayList<SettingsItem> sl) { if (!mSettings.get(SettingsFile.SETTINGS_DOLPHIN).isEmpty()) { for (int i = 0; i < 4; i++) { // TODO This controller_0 + i business is quite the hack. It should work, but only if the definitions are kept together and in order. Setting gcPadSetting = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_GCPAD_TYPE + i); sl.add(new SingleChoiceSetting(SettingsFile.KEY_GCPAD_TYPE + i, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.controller_0 + i, 0, R.array.gcpadTypeEntries, R.array.gcpadTypeValues, 0, gcPadSetting)); } } } private void addWiimoteSettings(ArrayList<SettingsItem> sl) { if (!mSettings.get(SettingsFile.SETTINGS_WIIMOTE).isEmpty()) { for (int i = 1; i <= 4; i++) { // TODO This wiimote_0 + i business is quite the hack. It should work, but only if the definitions are kept together and in order. Setting wiimoteSetting = mSettings.get(SettingsFile.SETTINGS_WIIMOTE).get(SettingsFile.SECTION_WIIMOTE + i).getSetting(SettingsFile.KEY_WIIMOTE_TYPE); sl.add(new SingleChoiceSetting(SettingsFile.KEY_WIIMOTE_TYPE, SettingsFile.SECTION_WIIMOTE + i, SettingsFile.SETTINGS_WIIMOTE, R.string.wiimote_0 + i - 1, 0, R.array.wiimoteTypeEntries, R.array.wiimoteTypeValues, 0, wiimoteSetting)); } } } private void addGraphicsSettings(ArrayList<SettingsItem> sl) { IntSetting videoBackend = new IntSetting(SettingsFile.KEY_VIDEO_BACKEND_INDEX, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, getVideoBackendValue()); Setting showFps = null; if (!mSettings.get(SettingsFile.SETTINGS_GFX).isEmpty()) { showFps = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_SHOW_FPS); } else { mSettings.get(SettingsFile.SETTINGS_GFX).put(SettingsFile.SECTION_GFX_SETTINGS, new SettingSection(SettingsFile.SECTION_GFX_SETTINGS)); mSettings.get(SettingsFile.SETTINGS_GFX).put(SettingsFile.SECTION_GFX_ENHANCEMENTS, new SettingSection(SettingsFile.SECTION_GFX_ENHANCEMENTS)); mSettings.get(SettingsFile.SETTINGS_GFX).put(SettingsFile.SECTION_GFX_HACKS, new SettingSection(SettingsFile.SECTION_GFX_HACKS)); mView.passSettingsToActivity(mSettings); } if (mSettings.get(SettingsFile.SETTINGS_DOLPHIN).isEmpty()) { mSettings.get(SettingsFile.SETTINGS_DOLPHIN).put(SettingsFile.SECTION_CORE, new SettingSection(SettingsFile.SECTION_CORE)); mView.passSettingsToActivity(mSettings); } sl.add(new SingleChoiceSetting(SettingsFile.KEY_VIDEO_BACKEND_INDEX, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.video_backend, R.string.video_backend_descrip, R.array.videoBackendEntries, R.array.videoBackendValues, 0, videoBackend)); sl.add(new CheckBoxSetting(SettingsFile.KEY_SHOW_FPS, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.show_fps, 0, true, showFps)); sl.add(new SubmenuSetting(null, null, R.string.enhancements, 0, SettingsFile.SECTION_GFX_ENHANCEMENTS)); sl.add(new SubmenuSetting(null, null, R.string.hacks, 0, SettingsFile.SECTION_GFX_HACKS)); } private void addEnhanceSettings(ArrayList<SettingsItem> sl) { int uberShaderModeValue = getUberShaderModeValue(); Setting resolution = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_INTERNAL_RES); Setting fsaa = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_FSAA); Setting anisotropic = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_ENHANCEMENTS).getSetting(SettingsFile.KEY_ANISOTROPY); Setting efbScaledCopy = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_HACKS).getSetting(SettingsFile.KEY_SCALED_EFB); Setting perPixel = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_PER_PIXEL); Setting forceFilter = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_ENHANCEMENTS).getSetting(SettingsFile.KEY_FORCE_FILTERING); Setting disableFog = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_DISABLE_FOG); IntSetting uberShaderMode = new IntSetting(SettingsFile.KEY_UBERSHADER_MODE, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, uberShaderModeValue); sl.add(new SingleChoiceSetting(SettingsFile.KEY_INTERNAL_RES, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.internal_resolution, R.string.internal_resolution_descrip, R.array.internalResolutionEntries, R.array.internalResolutionValues, 0, resolution)); sl.add(new SingleChoiceSetting(SettingsFile.KEY_FSAA, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.FSAA, R.string.FSAA_descrip, R.array.FSAAEntries, R.array.FSAAValues, 0, fsaa)); sl.add(new SingleChoiceSetting(SettingsFile.KEY_ANISOTROPY, SettingsFile.SECTION_GFX_ENHANCEMENTS, SettingsFile.SETTINGS_GFX, R.string.anisotropic_filtering, R.string.anisotropic_filtering_descrip, R.array.anisotropicFilteringEntries, R.array.anisotropicFilteringValues, 0, anisotropic)); // TODO // Setting shader = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_ENHANCEMENTS).getSetting(SettingsFile.KEY_POST_SHADER) // sl.add(new SingleChoiceSetting(.getKey(), , R.string., R.string._descrip, R.array., R.array.)); sl.add(new CheckBoxSetting(SettingsFile.KEY_SCALED_EFB, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, R.string.scaled_efb_copy, R.string.scaled_efb_copy_descrip, true, efbScaledCopy)); sl.add(new CheckBoxSetting(SettingsFile.KEY_PER_PIXEL, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.per_pixel_lighting, R.string.per_pixel_lighting_descrip, false, perPixel)); sl.add(new CheckBoxSetting(SettingsFile.KEY_FORCE_FILTERING, SettingsFile.SECTION_GFX_ENHANCEMENTS, SettingsFile.SETTINGS_GFX, R.string.force_texture_filtering, R.string.force_texture_filtering_descrip, false, forceFilter)); sl.add(new CheckBoxSetting(SettingsFile.KEY_DISABLE_FOG, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.disable_fog, R.string.disable_fog_descrip, false, disableFog)); sl.add(new SingleChoiceSetting(SettingsFile.KEY_UBERSHADER_MODE, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.ubershader_mode, R.string.ubershader_mode_descrip, R.array.uberShaderModeEntries, R.array.uberShaderModeValues, 0, uberShaderMode)); /* Check if we support stereo If we support desktop GL then we must support at least OpenGL 3.2 If we only support OpenGLES then we need both OpenGLES 3.1 and AEP */ EGLHelper helper = new EGLHelper(EGLHelper.EGL_OPENGL_ES2_BIT); if ((helper.supportsOpenGL() && helper.GetVersion() >= 320) || (helper.supportsGLES3() && helper.GetVersion() >= 310 && helper.SupportsExtension("GL_ANDROID_extension_pack_es31a"))) { sl.add(new SubmenuSetting(SettingsFile.KEY_STEREO_MODE, null, R.string.stereoscopy, R.string.stereoscopy_descrip, SettingsFile.SECTION_STEREOSCOPY)); } } private void addHackSettings(ArrayList<SettingsItem> sl) { boolean skipEFBValue = getInvertedBooleanValue(SettingsFile.SETTINGS_GFX, SettingsFile.SECTION_GFX_HACKS, SettingsFile.KEY_SKIP_EFB, false); boolean ignoreFormatValue = getInvertedBooleanValue(SettingsFile.SETTINGS_GFX, SettingsFile.SECTION_GFX_HACKS, SettingsFile.KEY_IGNORE_FORMAT, true); int xfbValue = getXfbValue(); BooleanSetting skipEFB = new BooleanSetting(SettingsFile.KEY_SKIP_EFB, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, skipEFBValue); BooleanSetting ignoreFormat = new BooleanSetting(SettingsFile.KEY_IGNORE_FORMAT, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, ignoreFormatValue); Setting efbToTexture = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_HACKS).getSetting(SettingsFile.KEY_EFB_TEXTURE); Setting texCacheAccuracy = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_TEXCACHE_ACCURACY); Setting gpuTextureDecoding = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_GPU_TEXTURE_DECODING); IntSetting xfb = new IntSetting(SettingsFile.KEY_XFB, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, xfbValue); Setting fastDepth = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_HACKS).getSetting(SettingsFile.KEY_FAST_DEPTH); Setting aspectRatio = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_ASPECT_RATIO); sl.add(new HeaderSetting(null, null, R.string.embedded_frame_buffer, 0)); sl.add(new CheckBoxSetting(SettingsFile.KEY_SKIP_EFB, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, R.string.skip_efb_access, R.string.skip_efb_access_descrip, false, skipEFB)); sl.add(new CheckBoxSetting(SettingsFile.KEY_IGNORE_FORMAT, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, R.string.ignore_format_changes, R.string.ignore_format_changes_descrip, true, ignoreFormat)); sl.add(new CheckBoxSetting(SettingsFile.KEY_EFB_TEXTURE, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, R.string.efb_copy_method, R.string.efb_copy_method_descrip, true, efbToTexture)); sl.add(new HeaderSetting(null, null, R.string.texture_cache, 0)); sl.add(new SingleChoiceSetting(SettingsFile.KEY_TEXCACHE_ACCURACY, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.texture_cache_accuracy, R.string.texture_cache_accuracy_descrip, R.array.textureCacheAccuracyEntries, R.array.textureCacheAccuracyValues, 128, texCacheAccuracy)); sl.add(new CheckBoxSetting(SettingsFile.KEY_GPU_TEXTURE_DECODING, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.gpu_texture_decoding, R.string.gpu_texture_decoding_descrip, false, gpuTextureDecoding)); sl.add(new HeaderSetting(null, null, R.string.external_frame_buffer, 0)); sl.add(new SingleChoiceSetting(SettingsFile.KEY_XFB_METHOD, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, R.string.external_frame_buffer, R.string.external_frame_buffer_descrip, R.array.externalFrameBufferEntries, R.array.externalFrameBufferValues, 0, xfb)); sl.add(new HeaderSetting(null, null, R.string.other, 0)); sl.add(new CheckBoxSetting(SettingsFile.KEY_FAST_DEPTH, SettingsFile.SECTION_GFX_HACKS, SettingsFile.SETTINGS_GFX, R.string.fast_depth_calculation, R.string.fast_depth_calculation_descrip, true, fastDepth)); sl.add(new SingleChoiceSetting(SettingsFile.KEY_ASPECT_RATIO, SettingsFile.SECTION_GFX_SETTINGS, SettingsFile.SETTINGS_GFX, R.string.aspect_ratio, R.string.aspect_ratio_descrip, R.array.aspectRatioEntries, R.array.aspectRatioValues, 0, aspectRatio)); } private void addStereoSettings(ArrayList<SettingsItem> sl) { if (mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_STEREOSCOPY) == null) { mSettings.get(SettingsFile.SETTINGS_GFX).put(SettingsFile.SECTION_STEREOSCOPY, new SettingSection(SettingsFile.SECTION_STEREOSCOPY)); } Setting stereoModeValue = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_STEREOSCOPY).getSetting(SettingsFile.KEY_STEREO_MODE); Setting stereoDepth = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_STEREOSCOPY).getSetting(SettingsFile.KEY_STEREO_DEPTH); Setting convergence = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_STEREOSCOPY).getSetting(SettingsFile.KEY_STEREO_CONV); Setting swapEyes = mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_STEREOSCOPY).getSetting(SettingsFile.KEY_STEREO_SWAP); sl.add(new SingleChoiceSetting(SettingsFile.KEY_STEREO_MODE, SettingsFile.SECTION_STEREOSCOPY, SettingsFile.SETTINGS_GFX, R.string.stereoscopy, R.string.stereoscopy_descrip, R.array.stereoscopyEntries, R.array.stereoscopyValues, 0, stereoModeValue)); sl.add(new SliderSetting(SettingsFile.KEY_STEREO_DEPTH, SettingsFile.SECTION_STEREOSCOPY, SettingsFile.SETTINGS_GFX, R.string.sterescopy_depth, R.string.sterescopy_depth_descrip, 100, "%", 20, stereoDepth)); sl.add(new SliderSetting(SettingsFile.KEY_STEREO_CONV, SettingsFile.SECTION_STEREOSCOPY, SettingsFile.SETTINGS_GFX, R.string.sterescopy_convergence, R.string.sterescopy_convergence_descrip, 200, "%", 0, convergence)); sl.add(new CheckBoxSetting(SettingsFile.KEY_STEREO_SWAP, SettingsFile.SECTION_STEREOSCOPY, SettingsFile.SETTINGS_GFX, R.string.sterescopy_swap_eyes, R.string.sterescopy_swap_eyes_descrip, false, swapEyes)); } private void addGcPadSubSettings(ArrayList<SettingsItem> sl, int gcPadNumber, int gcPadType) { if (gcPadType == 1) // Emulated { Setting bindA = null; Setting bindB = null; Setting bindX = null; Setting bindY = null; Setting bindZ = null; Setting bindStart = null; Setting bindControlUp = null; Setting bindControlDown = null; Setting bindControlLeft = null; Setting bindControlRight = null; Setting bindCUp = null; Setting bindCDown = null; Setting bindCLeft = null; Setting bindCRight = null; Setting bindTriggerL = null; Setting bindTriggerR = null; Setting bindDPadUp = null; Setting bindDPadDown = null; Setting bindDPadLeft = null; Setting bindDPadRight = null; try { bindA = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_A + gcPadNumber); bindB = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_B + gcPadNumber); bindX = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_X + gcPadNumber); bindY = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_Y + gcPadNumber); bindZ = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_Z + gcPadNumber); bindStart = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_START + gcPadNumber); bindControlUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_CONTROL_UP + gcPadNumber); bindControlDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_CONTROL_DOWN + gcPadNumber); bindControlLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_CONTROL_LEFT + gcPadNumber); bindControlRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_CONTROL_RIGHT + gcPadNumber); bindCUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_C_UP + gcPadNumber); bindCDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_C_DOWN + gcPadNumber); bindCLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_C_LEFT + gcPadNumber); bindCRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_C_RIGHT + gcPadNumber); bindTriggerL = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_TRIGGER_L + gcPadNumber); bindTriggerR = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_TRIGGER_R + gcPadNumber); bindDPadUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_DPAD_UP + gcPadNumber); bindDPadDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_DPAD_DOWN + gcPadNumber); bindDPadLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_DPAD_LEFT + gcPadNumber); bindDPadRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_GCBIND_DPAD_RIGHT + gcPadNumber); } catch (NullPointerException ex) { mSettings.get(SettingsFile.SETTINGS_DOLPHIN).put(SettingsFile.SECTION_BINDINGS, new SettingSection(SettingsFile.SECTION_BINDINGS)); mView.passSettingsToActivity(mSettings); } sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_A + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_a, bindA)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_B + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_b, bindB)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_X + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_x, bindX)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_Y + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_y, bindY)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_Z + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_z, bindZ)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_START + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_start, bindStart)); sl.add(new HeaderSetting(null, null, R.string.controller_control, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_CONTROL_UP + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindControlUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_CONTROL_DOWN + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindControlDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_CONTROL_LEFT + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindControlLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_CONTROL_RIGHT + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindControlRight)); sl.add(new HeaderSetting(null, null, R.string.controller_c, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_C_UP + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindCUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_C_DOWN + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindCDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_C_LEFT + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindCLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_C_RIGHT + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindCRight)); sl.add(new HeaderSetting(null, null, R.string.controller_trig, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_TRIGGER_L + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.trigger_left, bindTriggerL)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_TRIGGER_R + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.trigger_right, bindTriggerR)); sl.add(new HeaderSetting(null, null, R.string.controller_dpad, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_DPAD_UP + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindDPadUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_DPAD_DOWN + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindDPadDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_DPAD_LEFT + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindDPadLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_GCBIND_DPAD_RIGHT + gcPadNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindDPadRight)); } else // Adapter { Setting rumble = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_GCADAPTER_RUMBLE + gcPadNumber); Setting bongos = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_GCADAPTER_BONGOS + gcPadNumber); sl.add(new CheckBoxSetting(SettingsFile.KEY_GCADAPTER_RUMBLE + gcPadNumber, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.gc_adapter_rumble, R.string.gc_adapter_rumble_description, false, rumble)); sl.add(new CheckBoxSetting(SettingsFile.KEY_GCADAPTER_BONGOS + gcPadNumber, SettingsFile.SECTION_CORE, SettingsFile.SETTINGS_DOLPHIN, R.string.gc_adapter_bongos, R.string.gc_adapter_bongos_description, false, bongos)); } } private void addWiimoteSubSettings(ArrayList<SettingsItem> sl, int wiimoteNumber) { IntSetting extension = null; Setting bindA = null; Setting bindB = null; Setting bind1 = null; Setting bind2 = null; Setting bindMinus = null; Setting bindPlus = null; Setting bindHome = null; Setting bindIRUp = null; Setting bindIRDown = null; Setting bindIRLeft = null; Setting bindIRRight = null; Setting bindIRForward = null; Setting bindIRBackward = null; Setting bindIRHide = null; Setting bindSwingUp = null; Setting bindSwingDown = null; Setting bindSwingLeft = null; Setting bindSwingRight = null; Setting bindSwingForward = null; Setting bindSwingBackward = null; Setting bindTiltForward = null; Setting bindTiltBackward = null; Setting bindTiltLeft = null; Setting bindTiltRight = null; Setting bindTiltModifier = null; Setting bindShakeX = null; Setting bindShakeY = null; Setting bindShakeZ = null; Setting bindDPadUp = null; Setting bindDPadDown = null; Setting bindDPadLeft = null; Setting bindDPadRight = null; try { // Bindings use controller numbers 4-7 (0-3 are GameCube), but the extension setting uses 1-4. extension = new IntSetting(SettingsFile.KEY_WIIMOTE_EXTENSION, SettingsFile.SECTION_WIIMOTE + (wiimoteNumber - 3), SettingsFile.SETTINGS_WIIMOTE, getExtensionValue(wiimoteNumber - 3)); bindA = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_A + wiimoteNumber); bindB = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_B + wiimoteNumber); bind1 = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_1 + wiimoteNumber); bind2 = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_2 + wiimoteNumber); bindMinus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_MINUS + wiimoteNumber); bindPlus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_PLUS + wiimoteNumber); bindHome = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_HOME + wiimoteNumber); bindIRUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_UP + wiimoteNumber); bindIRDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_DOWN + wiimoteNumber); bindIRLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_LEFT + wiimoteNumber); bindIRRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_RIGHT + wiimoteNumber); bindIRForward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_FORWARD + wiimoteNumber); bindIRBackward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_BACKWARD + wiimoteNumber); bindIRHide = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_IR_HIDE + wiimoteNumber); bindSwingUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SWING_UP + wiimoteNumber); bindSwingDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SWING_DOWN + wiimoteNumber); bindSwingLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SWING_LEFT + wiimoteNumber); bindSwingRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SWING_RIGHT + wiimoteNumber); bindSwingForward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SWING_FORWARD + wiimoteNumber); bindSwingBackward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SWING_BACKWARD + wiimoteNumber); bindTiltForward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TILT_FORWARD + wiimoteNumber); bindTiltBackward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TILT_BACKWARD + wiimoteNumber); bindTiltLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TILT_LEFT + wiimoteNumber); bindTiltRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TILT_RIGHT + wiimoteNumber); bindTiltModifier = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TILT_MODIFIER + wiimoteNumber); bindShakeX = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SHAKE_X + wiimoteNumber); bindShakeY = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SHAKE_Y + wiimoteNumber); bindShakeZ = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_SHAKE_Z + wiimoteNumber); bindDPadUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DPAD_UP + wiimoteNumber); bindDPadDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DPAD_DOWN + wiimoteNumber); bindDPadLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DPAD_LEFT + wiimoteNumber); bindDPadRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DPAD_RIGHT + wiimoteNumber); } catch (NullPointerException ex) { mSettings.get(SettingsFile.SETTINGS_DOLPHIN).put(SettingsFile.SECTION_BINDINGS, new SettingSection(SettingsFile.SECTION_BINDINGS)); mView.passSettingsToActivity(mSettings); } sl.add(new SingleChoiceSetting(SettingsFile.KEY_WIIMOTE_EXTENSION, SettingsFile.SECTION_WIIMOTE + (wiimoteNumber - 3), SettingsFile.SETTINGS_WIIMOTE, R.string.wiimote_extensions, R.string.wiimote_extensions_descrip, R.array.wiimoteExtensionsEntries, R.array.wiimoteExtensionsValues, 0, extension)); sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_A + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_a, bindA)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_B + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_b, bindB)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_1 + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_one, bind1)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_2 + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_two, bind2)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_MINUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_minus, bindMinus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_PLUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_plus, bindPlus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_HOME + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_home, bindHome)); sl.add(new HeaderSetting(null, null, R.string.wiimote_ir, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindIRUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindIRDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindIRLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindIRRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_FORWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_forward, bindIRForward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_BACKWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_backward, bindIRBackward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_IR_HIDE + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.ir_hide, bindIRHide)); sl.add(new HeaderSetting(null, null, R.string.wiimote_swing, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SWING_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindSwingUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SWING_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindSwingDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SWING_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindSwingLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SWING_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindSwingRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SWING_FORWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_forward, bindSwingForward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SWING_BACKWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_backward, bindSwingBackward)); sl.add(new HeaderSetting(null, null, R.string.wiimote_tilt, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TILT_FORWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_forward, bindTiltForward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TILT_BACKWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_backward, bindTiltBackward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TILT_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindTiltLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TILT_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindTiltRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TILT_MODIFIER + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.tilt_modifier, bindTiltModifier)); sl.add(new HeaderSetting(null, null, R.string.wiimote_shake, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SHAKE_X + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.shake_x, bindShakeX)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SHAKE_Y + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.shake_y, bindShakeY)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_SHAKE_Z + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.shake_z, bindShakeZ)); sl.add(new HeaderSetting(null, null, R.string.controller_dpad, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DPAD_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindDPadUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DPAD_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindDPadDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DPAD_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindDPadLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DPAD_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindDPadRight)); } private void addExtensionTypeSettings(ArrayList<SettingsItem> sl, int wiimoteNumber, int extentionType) { switch (extentionType) { case 1: // Nunchuk Setting bindC = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_C + wiimoteNumber); Setting bindZ = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_Z + wiimoteNumber); Setting bindUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_UP + wiimoteNumber); Setting bindDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_DOWN + wiimoteNumber); Setting bindLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_LEFT + wiimoteNumber); Setting bindRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_RIGHT + wiimoteNumber); Setting bindSwingUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_UP + wiimoteNumber); Setting bindSwingDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_DOWN + wiimoteNumber); Setting bindSwingLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_LEFT + wiimoteNumber); Setting bindSwingRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_RIGHT + wiimoteNumber); Setting bindSwingForward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_FORWARD + wiimoteNumber); Setting bindSwingBackward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_BACKWARD + wiimoteNumber); Setting bindTiltForward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_FORWARD + wiimoteNumber); Setting bindTiltBackward = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_BACKWARD + wiimoteNumber); Setting bindTiltLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_LEFT + wiimoteNumber); Setting bindTiltRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_RIGHT + wiimoteNumber); Setting bindTiltModifier = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_MODIFIER + wiimoteNumber); Setting bindShakeX = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SHAKE_X + wiimoteNumber); Setting bindShakeY = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SHAKE_Y + wiimoteNumber); Setting bindShakeZ = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SHAKE_Z + wiimoteNumber); sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_C + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.nunchuk_button_c, bindC)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_Z + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_z, bindZ)); sl.add(new HeaderSetting(null, null, R.string.generic_stick, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindRight)); sl.add(new HeaderSetting(null, null, R.string.wiimote_swing, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindSwingUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindSwingDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindSwingLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindSwingRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_FORWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_forward, bindSwingForward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SWING_BACKWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_backward, bindSwingBackward)); sl.add(new HeaderSetting(null, null, R.string.wiimote_tilt, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_FORWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_forward, bindTiltForward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_BACKWARD + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_backward, bindTiltBackward)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindTiltLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindTiltRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_TILT_MODIFIER + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.tilt_modifier, bindTiltModifier)); sl.add(new HeaderSetting(null, null, R.string.wiimote_shake, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SHAKE_X + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.shake_x, bindShakeX)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SHAKE_Y + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.shake_y, bindShakeY)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_NUNCHUK_SHAKE_Z + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.shake_z, bindShakeZ)); break; case 2: // Classic Setting bindA = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_A + wiimoteNumber); Setting bindB = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_B + wiimoteNumber); Setting bindX = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_X + wiimoteNumber); Setting bindY = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_Y + wiimoteNumber); Setting bindZL = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_ZL + wiimoteNumber); Setting bindZR = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_ZR + wiimoteNumber); Setting bindMinus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_MINUS + wiimoteNumber); Setting bindPlus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_PLUS + wiimoteNumber); Setting bindHome = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_HOME + wiimoteNumber); Setting bindLeftUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_UP + wiimoteNumber); Setting bindLeftDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_DOWN + wiimoteNumber); Setting bindLeftLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_LEFT + wiimoteNumber); Setting bindLeftRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_RIGHT + wiimoteNumber); Setting bindRightUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_UP + wiimoteNumber); Setting bindRightDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_DOWN + wiimoteNumber); Setting bindRightLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_LEFT + wiimoteNumber); Setting bindRightRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_RIGHT + wiimoteNumber); Setting bindL = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_TRIGGER_L + wiimoteNumber); Setting bindR = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_TRIGGER_R + wiimoteNumber); Setting bindDpadUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_UP + wiimoteNumber); Setting bindDpadDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_DOWN + wiimoteNumber); Setting bindDpadLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_LEFT + wiimoteNumber); Setting bindDpadRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_RIGHT + wiimoteNumber); sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_A + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_a, bindA)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_B + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_b, bindB)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_X + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_x, bindX)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_Y + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_y, bindY)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_ZL + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.classic_button_zl, bindZL)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_ZR + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.classic_button_zr, bindZR)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_MINUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_minus, bindMinus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_PLUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_plus, bindPlus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_HOME + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_home, bindHome)); sl.add(new HeaderSetting(null, null, R.string.classic_leftstick, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindLeftUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindLeftDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindLeftLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_LEFT_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindLeftRight)); sl.add(new HeaderSetting(null, null, R.string.classic_rightstick, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindRightUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindRightDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindRightLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_RIGHT_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindRightRight)); sl.add(new HeaderSetting(null, null, R.string.controller_trig, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_TRIGGER_L + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.trigger_left, bindR)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_TRIGGER_R + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.trigger_right, bindL)); sl.add(new HeaderSetting(null, null, R.string.controller_dpad, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindDpadUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindDpadDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindDpadLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_CLASSIC_DPAD_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindDpadRight)); break; case 3: // Guitar Setting bindFretGreen = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_GREEN + wiimoteNumber); Setting bindFretRed = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_RED + wiimoteNumber); Setting bindFretYellow = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_YELLOW + wiimoteNumber); Setting bindFretBlue = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_BLUE + wiimoteNumber); Setting bindFretOrange = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_ORANGE + wiimoteNumber); Setting bindStrumUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_STRUM_UP + wiimoteNumber); Setting bindStrumDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_STRUM_DOWN + wiimoteNumber); Setting bindGuitarMinus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_MINUS + wiimoteNumber); Setting bindGuitarPlus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_PLUS + wiimoteNumber); Setting bindGuitarUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_UP + wiimoteNumber); Setting bindGuitarDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_DOWN + wiimoteNumber); Setting bindGuitarLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_LEFT + wiimoteNumber); Setting bindGuitarRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_RIGHT + wiimoteNumber); Setting bindWhammyBar = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_GUITAR_WHAMMY_BAR + wiimoteNumber); sl.add(new HeaderSetting(null, null, R.string.guitar_frets, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_GREEN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_green, bindFretGreen)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_RED + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_red, bindFretRed)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_YELLOW + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_yellow, bindFretYellow)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_BLUE + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_blue, bindFretBlue)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_FRET_ORANGE + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_orange, bindFretOrange)); sl.add(new HeaderSetting(null, null, R.string.guitar_strum, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_STRUM_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindStrumUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_STRUM_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindStrumDown)); sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_MINUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_minus, bindGuitarMinus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_PLUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_plus, bindGuitarPlus)); sl.add(new HeaderSetting(null, null, R.string.generic_stick, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindGuitarUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindGuitarDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindGuitarLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_STICK_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindGuitarRight)); sl.add(new HeaderSetting(null, null, R.string.guitar_whammy, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_GUITAR_WHAMMY_BAR + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindWhammyBar)); break; case 4: // Drums Setting bindPadRed = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_RED + wiimoteNumber); Setting bindPadYellow = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_YELLOW + wiimoteNumber); Setting bindPadBlue = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_BLUE + wiimoteNumber); Setting bindPadGreen = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_GREEN + wiimoteNumber); Setting bindPadOrange = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_ORANGE + wiimoteNumber); Setting bindPadBass = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_BASS + wiimoteNumber); Setting bindDrumsUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_UP + wiimoteNumber); Setting bindDrumsDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_DOWN + wiimoteNumber); Setting bindDrumsLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_LEFT + wiimoteNumber); Setting bindDrumsRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_RIGHT + wiimoteNumber); Setting bindDrumsMinus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_MINUS + wiimoteNumber); Setting bindDrumsPlus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_DRUMS_PLUS + wiimoteNumber); sl.add(new HeaderSetting(null, null, R.string.drums_pads, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_RED + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_red, bindPadRed)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_YELLOW + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_yellow, bindPadYellow)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_BLUE + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_blue, bindPadBlue)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_GREEN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_green, bindPadGreen)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_ORANGE + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_orange, bindPadOrange)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PAD_BASS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.drums_pad_bass, bindPadBass)); sl.add(new HeaderSetting(null, null, R.string.generic_stick, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindDrumsUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindDrumsDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindDrumsLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_STICK_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindDrumsRight)); sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_MINUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_minus, bindDrumsMinus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_DRUMS_PLUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_plus, bindDrumsPlus)); break; case 5: // Turntable Setting bindGreenLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_GREEN_LEFT + wiimoteNumber); Setting bindRedLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RED_LEFT + wiimoteNumber); Setting bindBlueLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_BLUE_LEFT + wiimoteNumber); Setting bindGreenRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_GREEN_RIGHT + wiimoteNumber); Setting bindRedRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RED_RIGHT + wiimoteNumber); Setting bindBlueRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_BLUE_RIGHT + wiimoteNumber); Setting bindTurntableMinus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_MINUS + wiimoteNumber); Setting bindTurntablePlus = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_PLUS + wiimoteNumber); Setting bindEuphoria = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_EUPHORIA + wiimoteNumber); Setting bindTurntableLeftLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_LEFT_LEFT + wiimoteNumber); Setting bindTurntableLeftRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_LEFT_RIGHT + wiimoteNumber); Setting bindTurntableRightLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RIGHT_LEFT + wiimoteNumber); Setting bindTurntableRightRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RIGHT_RIGHT + wiimoteNumber); Setting bindTurntableUp = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_UP + wiimoteNumber); Setting bindTurntableDown = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_DOWN + wiimoteNumber); Setting bindTurntableLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_LEFT + wiimoteNumber); Setting bindTurntableRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_RIGHT + wiimoteNumber); Setting bindEffectDial = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_EFFECT_DIAL + wiimoteNumber); Setting bindCrossfadeLeft = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_CROSSFADE_LEFT + wiimoteNumber); Setting bindCrossfadeRight = mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_BINDINGS).getSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_CROSSFADE_RIGHT + wiimoteNumber); sl.add(new HeaderSetting(null, null, R.string.generic_buttons, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_GREEN_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_green_left, bindGreenLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RED_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_red_left, bindRedLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_BLUE_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_blue_left, bindBlueLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_GREEN_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_green_right, bindGreenRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RED_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_red_right, bindRedRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_BLUE_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_blue_right, bindBlueRight)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_MINUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_minus, bindTurntableMinus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_PLUS + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.button_plus, bindTurntablePlus)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_EUPHORIA + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_button_euphoria, bindEuphoria)); sl.add(new HeaderSetting(null, null, R.string.turntable_table_left, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_LEFT_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindTurntableLeftLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_LEFT_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindTurntableLeftRight)); sl.add(new HeaderSetting(null, null, R.string.turntable_table_right, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RIGHT_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindTurntableRightLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_RIGHT_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindTurntableRightRight)); sl.add(new HeaderSetting(null, null, R.string.generic_stick, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_UP + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_up, bindTurntableUp)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_DOWN + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_down, bindTurntableDown)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindTurntableLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_STICK_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindTurntableRight)); sl.add(new HeaderSetting(null, null, R.string.turntable_effect, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_EFFECT_DIAL + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.turntable_effect_dial, bindEffectDial)); sl.add(new HeaderSetting(null, null, R.string.turntable_crossfade, 0)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_CROSSFADE_LEFT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_left, bindCrossfadeLeft)); sl.add(new InputBindingSetting(SettingsFile.KEY_WIIBIND_TURNTABLE_CROSSFADE_RIGHT + wiimoteNumber, SettingsFile.SECTION_BINDINGS, SettingsFile.SETTINGS_DOLPHIN, R.string.generic_right, bindCrossfadeRight)); break; } } private boolean getInvertedBooleanValue(int file, String section, String key, boolean defaultValue) { try { return !((BooleanSetting) mSettings.get(file).get(section).getSetting(key)).getValue(); } catch (NullPointerException ex) { return defaultValue; } } private int getVideoBackendValue() { int videoBackendValue; try { String videoBackend = ((StringSetting)mSettings.get(SettingsFile.SETTINGS_DOLPHIN).get(SettingsFile.SECTION_CORE).getSetting(SettingsFile.KEY_VIDEO_BACKEND)).getValue(); if (videoBackend.equals("OGL")) { videoBackendValue = 0; } else if (videoBackend.equals("Vulkan")) { videoBackendValue = 1; } else if (videoBackend.equals("Software Renderer")) { videoBackendValue = 2; } else if (videoBackend.equals("Null")) { videoBackendValue = 3; } else { videoBackendValue = 0; } } catch (NullPointerException ex) { videoBackendValue = 0; } return videoBackendValue; } private int getXfbValue() { int xfbValue; try { boolean usingXFB = ((BooleanSetting) mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_XFB)).getValue(); boolean usingRealXFB = ((BooleanSetting) mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_XFB_REAL)).getValue(); if (!usingXFB) { xfbValue = 0; } else if (!usingRealXFB) { xfbValue = 1; } else { xfbValue = 2; } } catch (NullPointerException ex) { xfbValue = 0; } return xfbValue; } private int getUberShaderModeValue() { int uberShaderModeValue = 0; try { boolean backgroundShaderCompiling = ((BooleanSetting) mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_BACKGROUND_SHADER_COMPILING)).getValue(); boolean disableSpecializedShaders = ((BooleanSetting) mSettings.get(SettingsFile.SETTINGS_GFX).get(SettingsFile.SECTION_GFX_SETTINGS).getSetting(SettingsFile.KEY_DISABLE_SPECIALIZED_SHADERS)).getValue(); if (disableSpecializedShaders) uberShaderModeValue = 2; // Exclusive else if (backgroundShaderCompiling) uberShaderModeValue = 1; // Hybrid else uberShaderModeValue = 0; // Disabled } catch (NullPointerException ex) { } return uberShaderModeValue; } private int getExtensionValue(int wiimoteNumber) { int extensionValue; try { String extension = ((StringSetting)mSettings.get(SettingsFile.SETTINGS_WIIMOTE).get(SettingsFile.SECTION_WIIMOTE + wiimoteNumber).getSetting(SettingsFile.KEY_WIIMOTE_EXTENSION)).getValue(); if (extension.equals("None")) { extensionValue = 0; } else if (extension.equals("Nunchuk")) { extensionValue = 1; } else if (extension.equals("Classic")) { extensionValue = 2; } else if (extension.equals("Guitar")) { extensionValue = 3; } else if (extension.equals("Drums")) { extensionValue = 4; } else if (extension.equals("Turntable")) { extensionValue = 5; } else { extensionValue = 0; } } catch (NullPointerException ex) { extensionValue = 0; } return extensionValue; } }
linkmauve/dolphin
Source/Android/app/src/main/java/org/dolphinemu/dolphinemu/ui/settings/SettingsFragmentPresenter.java
Java
gpl-2.0
81,693
#ifndef _FILE_UTIL_H_INCLUDED_ #define _FILE_UTIL_H_INCLUDED_ #include <windows.h> inline BOOL IsFileExist(LPCTSTR path) { return GetFileAttributes(path) != -1; } #endif
closer76/pcman-windows
Lite/FileUtil.h
C
gpl-2.0
185
/* <!-- copyright */ /* * aria2 - The high speed download utility * * Copyright (C) 2010 Tatsuhiro Tsujikawa * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * In addition, as a special exception, the copyright holders give * permission to link the code of portions of this program with the * OpenSSL library under certain conditions as described in each * individual source file, and distribute linked combinations * including the two. * You must obey the GNU General Public License in all respects * for all of the code used other than OpenSSL. If you modify * file(s) with this exception, you may extend this exception to your * version of the file(s), but you are not obligated to do so. If you * do not wish to do so, delete this exception statement from your * version. If you delete this exception statement from all source * files in the program, then also delete it here. */ /* copyright --> */ #include "SocketBuffer.h" #include <cassert> #include <algorithm> #include "SocketCore.h" #include "DlAbortEx.h" #include "message.h" #include "fmt.h" #include "LogFactory.h" #include "a2functional.h" namespace aria2 { SocketBuffer::ByteArrayBufEntry::ByteArrayBufEntry( std::vector<unsigned char> bytes, std::unique_ptr<ProgressUpdate> progressUpdate) : BufEntry(std::move(progressUpdate)), bytes_(std::move(bytes)) { } SocketBuffer::ByteArrayBufEntry::~ByteArrayBufEntry() = default; ssize_t SocketBuffer::ByteArrayBufEntry::send(const std::shared_ptr<SocketCore>& socket, size_t offset) { return socket->writeData(bytes_.data() + offset, bytes_.size() - offset); } bool SocketBuffer::ByteArrayBufEntry::final(size_t offset) const { return bytes_.size() <= offset; } size_t SocketBuffer::ByteArrayBufEntry::getLength() const { return bytes_.size(); } const unsigned char* SocketBuffer::ByteArrayBufEntry::getData() const { return bytes_.data(); } SocketBuffer::StringBufEntry::StringBufEntry( std::string s, std::unique_ptr<ProgressUpdate> progressUpdate) : BufEntry(std::move(progressUpdate)), str_(std::move(s)) { } ssize_t SocketBuffer::StringBufEntry::send(const std::shared_ptr<SocketCore>& socket, size_t offset) { return socket->writeData(str_.data() + offset, str_.size() - offset); } bool SocketBuffer::StringBufEntry::final(size_t offset) const { return str_.size() <= offset; } size_t SocketBuffer::StringBufEntry::getLength() const { return str_.size(); } const unsigned char* SocketBuffer::StringBufEntry::getData() const { return reinterpret_cast<const unsigned char*>(str_.c_str()); } SocketBuffer::SocketBuffer(std::shared_ptr<SocketCore> socket) : socket_(std::move(socket)), offset_(0) { } SocketBuffer::~SocketBuffer() = default; void SocketBuffer::pushBytes(std::vector<unsigned char> bytes, std::unique_ptr<ProgressUpdate> progressUpdate) { if (!bytes.empty()) { bufq_.push_back(make_unique<ByteArrayBufEntry>(std::move(bytes), std::move(progressUpdate))); } } void SocketBuffer::pushStr(std::string data, std::unique_ptr<ProgressUpdate> progressUpdate) { if (!data.empty()) { bufq_.push_back(make_unique<StringBufEntry>(std::move(data), std::move(progressUpdate))); } } ssize_t SocketBuffer::send() { a2iovec iov[A2_IOV_MAX]; size_t totalslen = 0; while (!bufq_.empty()) { size_t num; size_t bufqlen = bufq_.size(); ssize_t amount = 24_k; ssize_t firstlen = bufq_.front()->getLength() - offset_; amount -= firstlen; iov[0].A2IOVEC_BASE = reinterpret_cast<char*>( const_cast<unsigned char*>(bufq_.front()->getData() + offset_)); iov[0].A2IOVEC_LEN = firstlen; num = 1; for (auto i = std::begin(bufq_) + 1, eoi = std::end(bufq_); i != eoi && num < A2_IOV_MAX && num < bufqlen && amount > 0; ++i, ++num) { ssize_t len = (*i)->getLength(); if (amount < len) { break; } amount -= len; iov[num].A2IOVEC_BASE = reinterpret_cast<char*>(const_cast<unsigned char*>((*i)->getData())); iov[num].A2IOVEC_LEN = len; } ssize_t slen = socket_->writeVector(iov, num); if (slen == 0 && !socket_->wantRead() && !socket_->wantWrite()) { throw DL_ABORT_EX(fmt(EX_SOCKET_SEND, "Connection closed.")); } // A2_LOG_NOTICE(fmt("num=%zu, amount=%d, bufq.size()=%zu, SEND=%d", // num, amount, bufq_.size(), slen)); totalslen += slen; if (firstlen > slen) { offset_ += slen; bufq_.front()->progressUpdate(slen, false); if (socket_->wantRead() || socket_->wantWrite()) { goto fin; } continue; } slen -= firstlen; bufq_.front()->progressUpdate(firstlen, true); bufq_.pop_front(); offset_ = 0; for (size_t i = 1; i < num; ++i) { auto& buf = bufq_.front(); ssize_t len = buf->getLength(); if (len > slen) { offset_ = slen; bufq_.front()->progressUpdate(slen, false); goto fin; } slen -= len; bufq_.front()->progressUpdate(len, true); bufq_.pop_front(); } } fin: return totalslen; } bool SocketBuffer::sendBufferIsEmpty() const { return bufq_.empty(); } } // namespace aria2
tatsuhiro-t/aria2
src/SocketBuffer.cc
C++
gpl-2.0
6,086
<?php function innovation_preset_settings_form_alter(&$form){ $theme = innovation_get_theme(); $presets = $theme->presets; if(empty($presets)){ $presets = array( 'innovation_presets' => array( array( 'key' => 'preset1', 'base_color' => '#0072b9', 'text_color' => '#494949', 'link_color' => '#027ac6', 'link_hover_color' => '#027ac6', 'heading_color' => '#2385c2', ) ) ); }else{ $presets = array( 'innovation_presets' => $presets, ); } $preset_options = array(); foreach($presets['innovation_presets'] as $k=>$p){ $p = (array)$p; $preset_options[] = $p['key']; } $form['preset_settings'] = array( '#type' => 'details', '#title' => t('Color Settings'), '#group' => 'innovation_theme_settings', '#weight' => 0 ); $form['preset_settings']['innovation_presets'] = array( '#type' => 'hidden', '#default_value' => theme_get_setting('innovation_presets'), ); $form['preset_settings']['innovation_default_preset'] = array( '#type' => 'select', '#title' => t('Default preset'), '#options' => $preset_options, '#default_value' => theme_get_setting('innovation_default_preset'), '#description' => t('Choose and save to set this preset is default'), ); $form['preset_settings']['innovation_presets_settings'] = array( '#type' => 'fieldset', '#title' => t('Preset settings'), '#collapsible' => TRUE, '#collapsed' => TRUE, ); $form['preset_settings']['innovation_presets_settings']['innovation_presets_list'] = array( '#type' => 'select', '#title' => t('Presets'), '#default_value' => $theme->preset, '#options' => $preset_options, ); $default_preset = (array)$presets['innovation_presets'][0]; $form['preset_settings']['innovation_presets_settings']['innovation_preset_key'] = array( '#type' => 'textfield', '#title' => t('Preset name'), '#default_value' => $default_preset['key'], '#description' => 'The css file generated based on this name. e.g: style-[preset_name].css', '#attributes' => array('data-property'=>'key','class'=>array('preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_base_color'] = array( '#type' => 'textfield', '#title' => t('Base color'), '#default_value' => $default_preset['base_color'], '#attributes' => array('data-property'=>'base_color','class'=>array('color','preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_base_color_opposite'] = array( '#type' => 'textfield', '#title' => t('Opposite Base color'), '#default_value' => isset($default_preset['base_color_opposite'])?$default_preset['base_color_opposite']:$default_preset['base_color'], '#attributes' => array('data-property'=>'base_color_opposite','class'=>array('color','preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_link_color'] = array( '#type' => 'textfield', '#title' => t('Link color'), '#default_value' => $default_preset['link_color'], '#attributes' => array('data-property'=>'link_color','class'=>array('color','preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_link_hover_color'] = array( '#type' => 'textfield', '#title' => t('Link hover color'), '#default_value' => $default_preset['link_hover_color'], '#attributes' => array('data-property'=>'link_hover_color','class'=>array('color','preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_text_color'] = array( '#type' => 'textfield', '#title' => t('Text color'), '#default_value' => $default_preset['text_color'], '#attributes' => array('data-property'=>'text_color','class'=>array('color','preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_heading_color'] = array( '#type' => 'textfield', '#title' => t('Headding color'), '#default_value' => $default_preset['heading_color'], '#attributes' => array('data-property'=>'heading_color','class'=>array('color','preset-option')), ); $form['preset_settings']['innovation_presets_settings']['innovation_link_picker'] = array( '#markup' => '<div id="placeholder"></div>', ); // $form['#attached'] = array( //// 'drupalSettings' => $presets, // 'library' => array('innovation/innovation-farbtastic'), // ); $form['#attached']['drupalSettings'] = array_merge($form['#attached']['drupalSettings'], $presets); $form['#attached']['library'][] = 'innovation/innovation-farbtastic'; }
nearlyheadlessarvie/bloomingline
themes/innovation/includes/preset_settings.php
PHP
gpl-2.0
4,491
/** * This file is part of Aion-Lightning <aion-lightning.org>. * * Aion-Lightning is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * Aion-Lightning is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with Aion-Lightning. * If not, see <http://www.gnu.org/licenses/>. * * * Credits goes to all Open Source Core Developer Groups listed below * Please do not change here something, ragarding the developer credits, except the "developed by XXXX". * Even if you edit a lot of files in this source, you still have no rights to call it as "your Core". * Everybody knows that this Emulator Core was developed by Aion Lightning * @-Aion-Unique- * @-Aion-Lightning * @Aion-Engine * @Aion-Extreme * @Aion-NextGen * @Aion-Core Dev. */ package com.aionemu.gameserver.skillengine.effect; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlType; import com.aionemu.gameserver.geoEngine.collision.CollisionIntention; import com.aionemu.gameserver.geoEngine.math.Vector3f; import com.aionemu.gameserver.model.gameobjects.Creature; import com.aionemu.gameserver.model.gameobjects.player.Player; import com.aionemu.gameserver.network.aion.serverpackets.SM_TARGET_UPDATE; import com.aionemu.gameserver.skillengine.action.DamageType; import com.aionemu.gameserver.skillengine.model.DashStatus; import com.aionemu.gameserver.skillengine.model.Effect; import com.aionemu.gameserver.utils.MathUtil; import com.aionemu.gameserver.utils.PacketSendUtility; import com.aionemu.gameserver.world.World; import com.aionemu.gameserver.world.geo.GeoService; /** * @author Sarynth, modified Bobobear */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "MoveBehindEffect") public class MoveBehindEffect extends DamageEffect { @Override public void applyEffect(Effect effect) { super.applyEffect(effect); } @Override public void calculate(Effect effect) { if (effect.getEffected() == null) { return; } if (!(effect.getEffector() instanceof Player)) { return; } final Player effector = (Player) effect.getEffector(); final Creature effected = effect.getEffected(); double radian = Math.toRadians(MathUtil.convertHeadingToDegree(effected.getHeading())); float x1 = (float) (Math.cos(Math.PI + radian) * 1.3F); float y1 = (float) (Math.sin(Math.PI + radian) * 1.3F); float z = GeoService.getInstance().getZAfterMoveBehind(effected.getWorldId(), effected.getX() + x1, effected.getY() + y1, effected.getZ(), effected.getInstanceId()); byte intentions = (byte) (CollisionIntention.PHYSICAL.getId() | CollisionIntention.DOOR.getId()); Vector3f closestCollision = GeoService.getInstance().getClosestCollision(effector, effected.getX() + x1, effected.getY() + y1, z, false, intentions); //stop moving effected.getMoveController().abortMove(); // Deselect targets PacketSendUtility.sendPacket(effector, new SM_TARGET_UPDATE(effector)); // Move Effector to Effected effect.setDashStatus(DashStatus.MOVEBEHIND); World.getInstance().updatePosition(effector, closestCollision.getX(), closestCollision.getY(), closestCollision.getZ(), effected.getHeading()); //set target position for SM_CASTSPELL_RESULT effect.getSkill().setTargetPosition(closestCollision.getX(), closestCollision.getY(), closestCollision.getZ(), effected.getHeading()); if (!super.calculate(effect, DamageType.PHYSICAL)) { return; } } }
GiGatR00n/Aion-Core-v4.7.5
AC-Game/src/com/aionemu/gameserver/skillengine/effect/MoveBehindEffect.java
Java
gpl-2.0
4,188
/* * ProFTPD: mod_sql_mysql -- Support for connecting to MySQL databases. * Copyright (c) 2001 Andrew Houghton * Copyright (c) 2004-2021 TJ Saunders * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA. * * As a special exemption, Andrew Houghton and other respective copyright * holders give permission to link this program with OpenSSL, and distribute * the resulting executable, without including the source code for OpenSSL in * the source distribution. * * -----DO NOT EDIT----- * $Libraries: -lm -lmysqlclient -lz$ */ /* INTRO: * * mod_sql_mysql is the reference backend module for mod_sql. As such, * it's very, very over-commented. * * COPYRIGHT NOTICE: * * The section of the copyright notice above that refers to OpenSSL *must* * be present in every backend module. Without that exemption the backend * module cannot legally be compiled into ProFTPD, even if the backend * module makes no use of OpenSSL. * * FUNCTIONS IN THIS CODE: * * Backend modules are only called into via the functions listed in * sql_cmdtable (see the end of this file). All other functions are * internal. * * For stylistic reasons, it's requested that backend authors maintain the * following conventions: * 1) when returning data in a modret_t, use the standard ProFTPD macros * whenever possible (ERR_MSG, HANDLED, etc.) * 2) although 'static modret_t *' and 'MODRET' are equivalent, please * use MODRET only for those functions listed in sql_cmdtable. * * NAMED CONNECTIONS: * * Backend modules need to handle named connections. A named connection * is the complete specification of how to access a database coupled with * a unique (to the session) descriptive name. Every call mod_sql makes * into a backend is directed at a particular named connection. * mod_sql_mysql includes a set of simplistic cache functions to keep an * internal map of names to connections -- other backends should feel free * to handle this however they want. * * OPEN/CLOSE SEMANTICS & CONNECTION COUNTING: * * Administrators using mod_sql decide on one of three connection policies: * 1) open a connection to the database and hold it open for the life of * the client process * 2) open a connection to the database and hold it open for the life of * each call * 3) open a connection to the database and hold it open until a specified * period of time has elapsed with no activity * * mod_sql enforces this choice by requiring that backends: * 1) wrap each call in an open/close bracket (so if a connection isn't * currently open, it will be opened for the call and closed afterwards) * 2) properly do connection counting to ensure that a connection is not * re-opened unnecessarily, and not closed too early. * * In simple terms: if an administrator chooses the "one connection for the * life of the process" policy, mod_sql will send an initial cmd_open call * for that connection at the start of the client session, and a final * cmd_close call when the session ends. If an administrator chooses the * "per-call" connection policy, the initial cmd_open and final cmd_close * calls will not be made. If an administrator chooses the "timeout" * connection policy, connections may be closed at any time and may need * to be reopened for any call. * * CONNECTION TIMERS * * Backends are required to handle connection timers; when a connection is * defined via cmd_defineconnection, a time value (in seconds) will be sent * with the definition. Given the complexity of the semantics, it's * recommended that backend authors simply copy the timer handling code from * this module. Timer handling code exists in nearly every function in this * module; read the code for more information. * * ERROR HANDLING AND LOGGING: * * Proper error handling is required of backend modules -- the modret_t * structure passed back to mod_sql should have the error fields correctly * filled. mod_sql handles backend errors by logging them then closing the * connection and the session. Therefore, it's not necessary for backends * to log errors which will be passed back to mod_sql, but they should log * any errors or useful information which will not be returned in the * modret_t. If an error is transient -- if there's any way for the backend * module to handle an error intelligently -- it should do so. mod_sql * will always handle backend errors by ending the client session. * * Good debug logging is encouraged -- major functions (the functions that * mod_sql calls directly) should be wrapped in 'entering' and 'exiting' * DEBUG_FUNC level output, the text of SQL queries should be visible with * DEBUG_INFO level output, and any errors should be visible with DEBUG_WARN * level output. * * Check the code if this makes no sense. * * COMMENTS / QUESTIONS: * * Backend module writers are encouraged to read through all comments in this * file. If anything is unclear, please contact the author. */ /* Internal define used for debug and logging. All backends are encouraged * to use the same format. */ #define MOD_SQL_MYSQL_VERSION "mod_sql_mysql/4.0.9" #define _MYSQL_PORT "3306" #include "conf.h" #include "../contrib/mod_sql.h" #include <mysql.h> #include <stdbool.h> /* The my_make_scrambled_password{,_323} functions are not part of the public * MySQL API and are not declared in any of the MySQL header files. But the * use of these functions are required for implementing the "Backend" * SQLAuthType for MySQL. Thus these functions are declared here (Bug#3908). */ #if defined(HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD) void my_make_scrambled_password(char *to, const char *from, size_t fromlen); #endif #if defined(HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD_323) void my_make_scrambled_password_323(char *to, const char *from, size_t fromlen); #endif /* Timer-handling code adds the need for a couple of forward declarations. */ MODRET cmd_close(cmd_rec *cmd); module sql_mysql_module; /* * db_conn_struct: an internal struct to hold connection information. This * connection information is backend-specific; the members here reflect * the information MySQL needs for connections. * * Other backends are expected to make whatever changes are necessary. */ struct db_conn_struct { /* MySQL-specific members */ const char *host; const char *user; const char *pass; const char *db; const char *port; const char *unix_sock; /* For configuring the SSL/TLS session to the MySQL server. */ const char *ssl_cert_file; const char *ssl_key_file; const char *ssl_ca_file; const char *ssl_ca_dir; const char *ssl_ciphers; MYSQL *mysql; }; typedef struct db_conn_struct db_conn_t; /* * This struct is a wrapper for whatever backend data is needed to access * the database, and supports named connections, connection counting, and * timer handling. In most cases it should be enough for backend authors * to change db_conn_t and leave this struct alone. */ struct conn_entry_struct { const char *name; void *data; /* Timer handling */ int timer; int ttl; /* Connection handling */ unsigned int connections; }; typedef struct conn_entry_struct conn_entry_t; #define DEF_CONN_POOL_SIZE 10 static pool *conn_pool = NULL; static array_header *conn_cache = NULL; static const char *trace_channel = "sql.mysql"; /* sql_get_connection: walks the connection cache looking for the named * connection. Returns NULL if unsuccessful, a pointer to the conn_entry_t * if successful. */ static conn_entry_t *sql_get_connection(const char *conn_name) { register unsigned int i; if (conn_name == NULL) { errno = EINVAL; return NULL; } /* walk the array looking for our entry */ for (i = 0; i < conn_cache->nelts; i++) { conn_entry_t *entry; entry = ((conn_entry_t **) conn_cache->elts)[i]; if (strcmp(conn_name, entry->name) == 0) { return entry; } } errno = ENOENT; return NULL; } /* sql_add_connection: internal helper function to maintain a cache of * connections. Since we expect the number of named connections to be small, * simply use an array header to hold them. We don't allow duplicate * connection names. * * Returns: NULL if the insertion was unsuccessful, a pointer to the * conn_entry_t that was created if successful. */ static void *sql_add_connection(pool *p, const char *name, db_conn_t *conn) { conn_entry_t *entry = NULL; if (name == NULL || conn == NULL || p == NULL) { errno = EINVAL; return NULL; } if (sql_get_connection(name) != NULL) { errno = EEXIST; return NULL; } entry = (conn_entry_t *) pcalloc(p, sizeof(conn_entry_t)); entry->name = pstrdup(p, name); entry->data = conn; *((conn_entry_t **) push_array(conn_cache)) = entry; return entry; } /* sql_check_cmd: tests to make sure the cmd_rec is valid and is properly * filled in. If not, it's grounds for the daemon to shutdown. */ static void sql_check_cmd(cmd_rec *cmd, char *msg) { if (cmd == NULL || cmd->tmp_pool == NULL) { pr_log_pri(PR_LOG_ERR, MOD_SQL_MYSQL_VERSION ": '%s' was passed an invalid cmd_rec (internal bug); shutting down", msg); sql_log(DEBUG_WARN, "'%s' was passed an invalid cmd_rec (internal bug); " "shutting down", msg); pr_session_end(0); } return; } /* sql_timer_cb: when a timer goes off, this is the function that gets called. * This function makes assumptions about the db_conn_t members. */ static int sql_timer_cb(CALLBACK_FRAME) { register unsigned int i; for (i = 0; i < conn_cache->nelts; i++) { conn_entry_t *entry = NULL; entry = ((conn_entry_t **) conn_cache->elts)[i]; if ((unsigned long) entry->timer == p2) { cmd_rec *cmd = NULL; sql_log(DEBUG_INFO, "timer expired for connection '%s'", entry->name); cmd = sql_make_cmd(conn_pool, 2, entry->name, "1"); cmd_close(cmd); SQL_FREE_CMD(cmd); entry->timer = 0; } } return 0; } /* build_error: constructs a modret_t filled with error information; * mod_sql_mysql calls this function and returns the resulting mod_ret_t * whenever a call to the database results in an error. Other backends * may want to use a different method to return error information. */ static modret_t *build_error(cmd_rec *cmd, db_conn_t *conn) { char num[20] = {'\0'}; if (conn == NULL) { return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } pr_snprintf(num, 20, "%u", mysql_errno(conn->mysql)); return PR_ERROR_MSG(cmd, pstrdup(cmd->pool, num), pstrdup(cmd->pool, (char *) mysql_error(conn->mysql))); } /* build_data: both cmd_select and cmd_procedure potentially * return data to mod_sql; this function builds a modret to return * that data. This is MySQL specific; other backends may choose * to do things differently. */ static modret_t *build_data(cmd_rec *cmd, db_conn_t *conn) { modret_t *mr = NULL; MYSQL *mysql = NULL; MYSQL_RES *result = NULL; MYSQL_ROW row; sql_data_t *sd = NULL; char **data = NULL; unsigned long cnt = 0; unsigned long i = 0; if (conn == NULL) { return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } mysql = conn->mysql; /* Would much rather use mysql_use_result here but without knowing * the number of rows returned we can't presize the data[] array. */ result = mysql_store_result(mysql); if (!result) { return build_error(cmd, conn); } sd = (sql_data_t *) pcalloc(cmd->tmp_pool, sizeof(sql_data_t)); sd->rnum = (unsigned long) mysql_num_rows(result); sd->fnum = (unsigned long) mysql_num_fields(result); cnt = sd->rnum * sd->fnum; data = (char **) pcalloc(cmd->tmp_pool, sizeof(char *) * (cnt + 1)); while ((row = mysql_fetch_row(result))) { for (cnt = 0; cnt < sd->fnum; cnt++) data[i++] = pstrdup(cmd->tmp_pool, row[cnt]); } /* At this point either we finished correctly or an error occurred in the * fetch. Do the right thing. */ if (mysql_errno(mysql) != 0) { mr = build_error(cmd, conn); mysql_free_result(result); return mr; } mysql_free_result(result); data[i] = NULL; sd->data = data; #ifdef CLIENT_MULTI_RESULTS /* We might be dealing with multiple result sets here, as when a stored * procedure was called which produced more results than we expect. * * We only want the first result set, so simply iterate through and free * up any remaining result sets. */ while (mysql_next_result(mysql) == 0) { pr_signals_handle(); result = mysql_store_result(mysql); mysql_free_result(result); } #endif return mod_create_data(cmd, (void *) sd); } /* * cmd_open: attempts to open a named connection to the database. * * Inputs: * cmd->argv[0]: connection name * * Returns: * either a properly filled error modret_t if a connection could not be * opened, or a simple non-error modret_t. * * Notes: * mod_sql depends on these semantics -- a backend should not open * a connection unless mod_sql requests it, nor close one unless * mod_sql requests it. Connection counting is *REQUIRED* for complete * compatibility; a connection should not be closed unless the count * reaches 0, and ideally will not need to be re-opened for counts > 1. */ MODRET cmd_open(cmd_rec *cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; unsigned long client_flags = CLIENT_INTERACTIVE; #ifdef PR_USE_NLS const char *encoding = NULL; #endif #ifdef HAVE_MYSQL_MYSQL_GET_SSL_CIPHER const char *ssl_cipher = NULL; #endif sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_open"); sql_check_cmd(cmd, "cmd_open"); if (cmd->argc < 1) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_open"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_open"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; /* If we're already open (connections > 0), AND our connection to MySQL * is still alive, increment the connection counter, reset our timer (if * we have one), and return HANDLED. */ if (entry->connections > 0) { if (mysql_ping(conn->mysql) == 0) { entry->connections++; if (entry->timer) { pr_timer_reset(entry->timer, &sql_mysql_module); } sql_log(DEBUG_INFO, "connection '%s' count is now %d", entry->name, entry->connections); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_open"); return PR_HANDLED(cmd); } else { sql_log(DEBUG_INFO, "lost connection to database: %s", mysql_error(conn->mysql)); entry->connections = 0; if (entry->timer) { pr_timer_remove(entry->timer, &sql_mysql_module); entry->timer = 0; } sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_open"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "lost connection to database"); } } /* Make sure we have a new conn struct */ conn->mysql = mysql_init(NULL); if (conn->mysql == NULL) { pr_log_pri(PR_LOG_ALERT, MOD_SQL_MYSQL_VERSION ": failed to allocate memory for MYSQL structure; shutting down"); sql_log(DEBUG_WARN, "%s", "failed to allocate memory for MYSQL structure; " "shutting down"); pr_session_end(0); } if (!(pr_sql_opts & SQL_OPT_IGNORE_CONFIG_FILE)) { /* Make sure the MySQL config files are read in. This will read in * options from group "client" in the MySQL .cnf files. */ mysql_options(conn->mysql, MYSQL_READ_DEFAULT_GROUP, "client"); } #if MYSQL_VERSION_ID >= 50013 /* The MYSQL_OPT_RECONNECT option appeared in MySQL 5.0.13, according to * * http://dev.mysql.com/doc/refman/5.0/en/auto-reconnect.html */ if (!(pr_sql_opts & SQL_OPT_NO_RECONNECT)) { #if MYSQL_VERSION_ID >= 80000 bool reconnect = true; #else my_bool reconnect = TRUE; #endif mysql_options(conn->mysql, MYSQL_OPT_RECONNECT, &reconnect); } #endif #ifdef CLIENT_MULTI_RESULTS /* Enable mod_sql_mysql to deal with multiple result sets which may be * returned from calling stored procedures. */ client_flags |= CLIENT_MULTI_RESULTS; #endif #if defined(HAVE_MYSQL_MYSQL_SSL_SET) /* Per the MySQL docs, this function always returns success. Errors are * reported when we actually attempt to connect. * * Note: There are some other TLS-related options, in newer versions of * MySQL, which might be interest (although they require the use of the * mysql_options() function, not mysql_ssl_set()): * * MYSQL_OPT_SSL_ENFORCE (boolean, defaults to 'false') * MYSQL_OPT_SSL_VERIFY_SERVER_CERT (boolean, defaults to 'false') * MYSQL_OPT_TLS_VERSION (char *, for configuring the protocol versions) */ (void) mysql_ssl_set(conn->mysql, conn->ssl_key_file, conn->ssl_cert_file, conn->ssl_ca_file, conn->ssl_ca_dir, conn->ssl_ciphers); #endif if (!mysql_real_connect(conn->mysql, conn->host, conn->user, conn->pass, conn->db, (int) strtol(conn->port, (char **) NULL, 10), conn->unix_sock, client_flags)) { modret_t *mr = NULL; /* If it didn't work, return an error. */ sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_open"); mr = build_error(cmd, conn); /* Since we failed to connect here, avoid a memory leak by freeing up the * mysql conn struct. */ mysql_close(conn->mysql); conn->mysql = NULL; return mr; } sql_log(DEBUG_FUNC, "MySQL version ID: %d", MYSQL_VERSION_ID); sql_log(DEBUG_FUNC, "MySQL client version: %s", mysql_get_client_info()); sql_log(DEBUG_FUNC, "MySQL server version: %s", mysql_get_server_info(conn->mysql)); # if MYSQL_VERSION_ID >= 50703 && defined(HAVE_MYSQL_GET_OPTION) /* Log the configured authentication plugin, if any. For example, it * might be set in the my.cnf file using: * * [client] * default-auth = mysql_native_password * * Note: the mysql_get_option() function appeared in MySQL 5.7.3, as per: * * https://dev.mysql.com/doc/refman/5.7/en/mysql-get-option.html * * The MYSQL_DEFAULT_AUTH value is an enum, not a #define, so we cannot * use a simple #ifdef here. */ { const char *auth_plugin = NULL; if (mysql_get_option(conn->mysql, MYSQL_DEFAULT_AUTH, &auth_plugin) == 0) { /* There may not have been a default auth plugin explicitly configured, * and the MySQL internals themselves may not set one. So it is not * surprising if the pointer remains null. */ if (auth_plugin != NULL) { sql_log(DEBUG_FUNC, "MySQL client default authentication plugin: %s", auth_plugin); } } } #endif /* MySQL 5.7.3 and later */ #if defined(HAVE_MYSQL_MYSQL_GET_SSL_CIPHER) ssl_cipher = mysql_get_ssl_cipher(conn->mysql); /* XXX Should we fail the connection here, if we expect an SSL session to * have been successfully completed/required? */ if (ssl_cipher != NULL) { sql_log(DEBUG_FUNC, "%s", "MySQL SSL connection: true"); sql_log(DEBUG_FUNC, "MySQL SSL cipher: %s", ssl_cipher); } else { sql_log(DEBUG_FUNC, "%s", "MySQL SSL connection: false"); } #endif #if defined(PR_USE_NLS) encoding = pr_encode_get_encoding(); if (encoding != NULL) { # if MYSQL_VERSION_ID >= 50007 /* Configure the connection for the current local character set. * * Note: the mysql_set_character_set() function appeared in MySQL 5.0.7, * as per: * * http://dev.mysql.com/doc/refman/5.0/en/mysql-set-character-set.html * * Yes, even though the variable names say "charset", we (and MySQL, * though their documentation says otherwise) actually mean "encoding". */ if (strcasecmp(encoding, "UTF-8") == 0) { # if MYSQL_VERSION_ID >= 50503 /* MySQL prefers the name "utf8mb4", not "UTF-8" */ encoding = pstrdup(cmd->tmp_pool, "utf8mb4"); # else /* MySQL prefers the name "utf8", not "UTF-8" */ encoding = pstrdup(cmd->tmp_pool, "utf8"); # endif /* MySQL before 5.5.3 */ } if (mysql_set_character_set(conn->mysql, encoding) != 0) { /* Failing to set the character set should NOT be a fatal error. * There are situations where, due to client/server mismatch, the * requested character set may not be available. Thus for now, * we simply log the failure. * * A future improvement might be to implement fallback behavior, * trying to set "older" character sets as needed. */ sql_log(DEBUG_FUNC, MOD_SQL_MYSQL_VERSION ": failed to set character set '%s': %s (%u)", encoding, mysql_error(conn->mysql), mysql_errno(conn->mysql)); } sql_log(DEBUG_FUNC, "MySQL connection character set now '%s' (from '%s')", mysql_character_set_name(conn->mysql), pr_encode_get_encoding()); # else /* No mysql_set_character_set() API available. But * mysql_character_set_name() has been around for a while; we can use it * to at least see whether there might be a character set discrepancy. */ const char *local_charset = pr_encode_get_encoding(); const char *mysql_charset = mysql_character_set_name(conn->mysql); if (strcasecmp(mysql_charset, "utf8") == 0) { mysql_charset = pstrdup(cmd->tmp_pool, "UTF-8"); } if (local_charset && mysql_charset && strcasecmp(local_charset, mysql_charset) != 0) { pr_log_pri(PR_LOG_ERR, MOD_SQL_MYSQL_VERSION ": local character set '%s' does not match MySQL character set '%s', " "SQL injection possible, shutting down", local_charset, mysql_charset); sql_log(DEBUG_WARN, "local character set '%s' does not match MySQL " "character set '%s', SQL injection possible, shutting down", local_charset, mysql_charset); pr_session_end(0); } # endif /* older MySQL */ } #endif /* !PR_USE_NLS */ /* bump connections */ entry->connections++; if (pr_sql_conn_policy == SQL_CONN_POLICY_PERSESSION) { /* If the connection policy is PERSESSION... */ if (entry->connections == 1) { /* ...and we are actually opening the first connection to the database; * we want to make sure this connection stays open, after this first use * (as per Bug#3290). To do this, we re-bump the connection count. */ entry->connections++; } } else if (entry->ttl > 0) { /* Set up our timer if necessary */ entry->timer = pr_timer_add(entry->ttl, -1, &sql_mysql_module, sql_timer_cb, "mysql connection ttl"); sql_log(DEBUG_INFO, "connection '%s' - %d second timer started", entry->name, entry->ttl); /* timed connections get re-bumped so they don't go away when cmd_close * is called. */ entry->connections++; } /* return HANDLED */ sql_log(DEBUG_INFO, "connection '%s' opened", entry->name); sql_log(DEBUG_INFO, "connection '%s' count is now %d", entry->name, entry->connections); pr_event_generate("mod_sql.db.connection-opened", &sql_mysql_module); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_open"); return PR_HANDLED(cmd); } /* * cmd_close: attempts to close the named connection. * * Inputs: * cmd->argv[0]: connection name * Optional: * cmd->argv[1]: close immediately * * Returns: * either a properly filled error modret_t if a connection could not be * closed, or a simple non-error modret_t. For the case of mod_sql_mysql, * there are no error codes returned by the close call; other backends * may be able to return a useful error message. * * Notes: * mod_sql depends on these semantics -- a backend should not open * a connection unless mod_sql requests it, nor close one unless * mod_sql requests it. Connection counting is *REQUIRED* for complete * compatibility; a connection should not be closed unless the count * reaches 0, and should not need to be re-opened for counts > 1. * * If argv[1] exists and is not NULL, the connection should be immediately * closed and the connection count should be reset to 0. */ MODRET cmd_close(cmd_rec *cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_close"); sql_check_cmd(cmd, "cmd_close"); if ((cmd->argc < 1) || (cmd->argc > 2)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_close"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_close"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; /* if we're closed already (connections == 0) return HANDLED */ if (entry->connections == 0) { sql_log(DEBUG_INFO, "connection '%s' count is now %d", entry->name, entry->connections); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_close"); return PR_HANDLED(cmd); } /* decrement connections. If our count is 0 or we received a second arg * close the connection, explicitly set the counter to 0, and remove any * timers. */ if (((--entry->connections) == 0) || ((cmd->argc == 2) && (cmd->argv[1]))) { if (conn->mysql != NULL) { mysql_close(conn->mysql); conn->mysql = NULL; } entry->connections = 0; if (entry->timer) { pr_timer_remove(entry->timer, &sql_mysql_module); entry->timer = 0; sql_log(DEBUG_INFO, "connection '%s' - timer stopped", entry->name); } sql_log(DEBUG_INFO, "connection '%s' closed", entry->name); pr_event_generate("mod_sql.db.connection-closed", &sql_mysql_module); } sql_log(DEBUG_INFO, "connection '%s' count is now %d", entry->name, entry->connections); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_close"); return PR_HANDLED(cmd); } /* cmd_defineconnection: takes all information about a database * connection and stores it for later use. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: username portion of the SQLConnectInfo directive * cmd->argv[2]: password portion of the SQLConnectInfo directive * cmd->argv[3]: info portion of the SQLConnectInfo directive * * Optional: * cmd->argv[4]: time-to-live in seconds * cmd->argv[5]: SSL client cert file * cmd->argv[6]: SSL client key file * cmd->argv[7]: SSL CA file * cmd->argv[8]: SSL CA directory * cmd->argv[9]: SSL ciphers * * Returns: * either a properly filled error modret_t if the connection could not * defined, or a simple non-error modret_t. * * Notes: * time-to-live is the length of time to allow a connection to remain unused; * once that amount of time has passed, a connection should be closed and * it's connection count should be reduced to 0. If ttl is 0, or ttl is not * a number or ttl is negative, the connection will be assumed to have no * associated timer. */ MODRET cmd_defineconnection(cmd_rec *cmd) { char *have_host = NULL, *have_port = NULL, *info = NULL, *name = NULL; const char *db = NULL, *host = NULL, *port = NULL; const char *ssl_cert_file = NULL, *ssl_key_file = NULL, *ssl_ca_file = NULL; const char *ssl_ca_dir = NULL, *ssl_ciphers = NULL; conn_entry_t *entry = NULL; db_conn_t *conn = NULL; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_defineconnection"); sql_check_cmd(cmd, "cmd_defineconnection"); if (cmd->argc < 4 || cmd->argc > 10 || !cmd->argv[0]) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_defineconnection"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } if (conn_pool == NULL) { pr_log_pri(PR_LOG_WARNING, "WARNING: the mod_sql_mysql module has not been " "properly initialized. Please make sure your --with-modules configure " "option lists mod_sql *before* mod_sql_mysql, and recompile."); sql_log(DEBUG_FUNC, "%s", "The mod_sql_mysql module has not been properly " "initialized. Please make sure your --with-modules configure option " "lists mod_sql *before* mod_sql_mysql, and recompile."); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_defineconnection"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "uninitialized module"); } conn = (db_conn_t *) pcalloc(conn_pool, sizeof(db_conn_t)); name = pstrdup(conn_pool, cmd->argv[0]); conn->user = pstrdup(conn_pool, cmd->argv[1]); conn->pass = pstrdup(conn_pool, cmd->argv[2]); info = cmd->argv[3]; db = pstrdup(cmd->tmp_pool, info); have_host = strchr(db, '@'); have_port = strchr(db, ':'); /* If have_port, parse it, otherwise default it. * If have_port, set it to '\0'. * * If have_host, parse it, otherwise default it. * If have_host, set it to '\0'. */ if (have_port != NULL) { port = have_port + 1; *have_port = '\0'; } else { port = _MYSQL_PORT; } if (have_host != NULL) { host = have_host + 1; *have_host = '\0'; } else { host = "localhost"; } /* Hack to support ability to configure path to Unix domain socket * for MySQL: if the host string starts with a '/', assume it's * a path to the Unix domain socket to use. */ if (*host == '/') { conn->unix_sock = pstrdup(conn_pool, host); } else { conn->host = pstrdup(conn_pool, host); } conn->db = pstrdup(conn_pool, db); conn->port = pstrdup(conn_pool, port); /* SSL parameters, if configured. */ if (cmd->argc >= 6) { ssl_cert_file = cmd->argv[5]; if (ssl_cert_file != NULL) { conn->ssl_cert_file = pstrdup(conn_pool, ssl_cert_file); } } if (cmd->argc >= 7) { ssl_key_file = cmd->argv[6]; if (ssl_key_file != NULL) { conn->ssl_key_file = pstrdup(conn_pool, ssl_key_file); } } if (cmd->argc >= 8) { ssl_ca_file = cmd->argv[7]; if (ssl_ca_file != NULL) { conn->ssl_ca_file = pstrdup(conn_pool, ssl_ca_file); } } if (cmd->argc >= 9) { ssl_ca_dir = cmd->argv[8]; if (ssl_ca_dir != NULL) { conn->ssl_ca_dir = pstrdup(conn_pool, ssl_ca_dir); } } if (cmd->argc >= 10) { ssl_ciphers = cmd->argv[9]; if (ssl_ciphers != NULL) { conn->ssl_ciphers = pstrdup(conn_pool, ssl_ciphers); } } entry = sql_add_connection(conn_pool, name, (void *) conn); if (entry == NULL && errno == EEXIST) { /* Log only connections named other than "default", for debugging * misconfigurations with multiple different SQLNamedConnectInfo * directives using the same name. */ if (strcmp(name, "default") != 0) { sql_log(DEBUG_FUNC, "named connection '%s' already exists", name); } entry = sql_get_connection(name); } if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_defineconnection"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "error adding named connection"); } if (cmd->argc >= 5) { entry->ttl = (int) strtol(cmd->argv[4], (char **) NULL, 10); if (entry->ttl >= 1) { pr_sql_conn_policy = SQL_CONN_POLICY_TIMER; } else { entry->ttl = 0; } } entry->timer = 0; entry->connections = 0; sql_log(DEBUG_INFO, " name: '%s'", entry->name); sql_log(DEBUG_INFO, " user: '%s'", conn->user); if (conn->host != NULL) { sql_log(DEBUG_INFO, " host: '%s'", conn->host); } else if (conn->unix_sock != NULL) { sql_log(DEBUG_INFO, "socket: '%s'", conn->unix_sock); } sql_log(DEBUG_INFO, " db: '%s'", conn->db); sql_log(DEBUG_INFO, " port: '%s'", conn->port); sql_log(DEBUG_INFO, " ttl: '%d'", entry->ttl); if (conn->ssl_cert_file != NULL) { sql_log(DEBUG_INFO, " ssl: client cert = '%s'", conn->ssl_cert_file); } if (conn->ssl_key_file != NULL) { sql_log(DEBUG_INFO, " ssl: client key = '%s'", conn->ssl_key_file); } if (conn->ssl_ca_file != NULL) { sql_log(DEBUG_INFO, " ssl: CA file = '%s'", conn->ssl_ca_file); } if (conn->ssl_ca_dir != NULL) { sql_log(DEBUG_INFO, " ssl: CA dir = '%s'", conn->ssl_ca_dir); } if (conn->ssl_ciphers != NULL) { sql_log(DEBUG_INFO, " ssl: ciphers = '%s'", conn->ssl_ciphers); } sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_defineconnection"); return PR_HANDLED(cmd); } /* * cmd_exit: closes all open connections. * * Inputs: * None * * Returns: * A simple non-error modret_t. */ static modret_t *cmd_exit(cmd_rec *cmd) { register unsigned int i = 0; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_exit"); for (i = 0; i < conn_cache->nelts; i++) { conn_entry_t *entry; entry = ((conn_entry_t **) conn_cache->elts)[i]; if (entry->connections > 0) { cmd_rec *close_cmd; close_cmd = sql_make_cmd(conn_pool, 2, entry->name, "1"); cmd_close(close_cmd); destroy_pool(close_cmd->pool); } } sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_exit"); return PR_HANDLED(cmd); } /* * cmd_select: executes a SELECT query. properly constructing the query * based on the inputs. See mod_sql.h for the definition of the _sql_data * structure which is used to return the result data. * * cmd_select takes either exactly two inputs, or more than two. If only * two inputs are given, the second is a monolithic query string. See * the examples below. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: table * cmd->argv[2]: select string * Optional: * cmd->argv[3]: where clause * cmd->argv[4]: requested number of return rows (LIMIT) * * etc. : other options, such as "GROUP BY", "ORDER BY", * and "DISTINCT" will start at cmd->arg[5]. All * backends MUST support 'DISTINCT', the other * arguments are optional (but encouraged). * * Returns: * either a properly filled error modret_t if the select failed, or a * modret_t with the result data filled in. * * Example: * These are example queries that would be executed for MySQL; other * backends will have different SQL syntax. * * argv[] = "default","user","userid, count", "userid='aah'","2" * query = "SELECT userid, count FROM user WHERE userid='aah' LIMIT 2" * * argv[] = "default","usr1, usr2","usr1.foo, usr2.bar" * query = "SELECT usr1.foo, usr2.bar FROM usr1, usr2" * * argv[] = "default","usr1","foo",,,"DISTINCT" * query = "SELECT DISTINCT foo FROM usr1" * * argv[] = "default","bar FROM usr1 WHERE tmp=1 ORDER BY bar" * query = "SELECT bar FROM usr1 WHERE tmp=1 ORDER BY bar" * * Notes: * certain selects could return huge amounts of data. do whatever is * possible to minimize the amount of data copying here. */ MODRET cmd_select(cmd_rec *cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; modret_t *cmr = NULL; modret_t *dmr = NULL; char *query = NULL; cmd_rec *close_cmd; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_select"); sql_check_cmd(cmd, "cmd_select"); if (cmd->argc < 2) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_select"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_select"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; cmr = cmd_open(cmd); if (MODRET_ERROR(cmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_select"); return cmr; } /* construct the query string */ if (cmd->argc == 2) { query = pstrcat(cmd->tmp_pool, "SELECT ", cmd->argv[1], NULL); } else { /* Make sure to properly quote the table name, as it might be a reserved * keyword; see Issue #1212. */ query = pstrcat(cmd->tmp_pool, cmd->argv[2], " FROM `", cmd->argv[1], "`", NULL); if (cmd->argc > 3 && cmd->argv[3]) { query = pstrcat(cmd->tmp_pool, query, " WHERE ", cmd->argv[3], NULL); } if (cmd->argc > 4 && cmd->argv[4]) { query = pstrcat(cmd->tmp_pool, query, " LIMIT ", cmd->argv[4], NULL); } if (cmd->argc > 5) { register unsigned int i; /* Handle the optional arguments -- they're rare, so in this case * we'll play with the already constructed query string, but in * general we should probably take optional arguments into account * and put the query string together later once we know what they are. */ for (i = 5; i < cmd->argc; i++) { if (cmd->argv[i] != NULL && strcasecmp("DISTINCT", cmd->argv[i]) == 0) { query = pstrcat(cmd->tmp_pool, "DISTINCT ", query, NULL); } } } query = pstrcat(cmd->tmp_pool, "SELECT ", query, NULL); } /* Log the query string */ sql_log(DEBUG_INFO, "query \"%s\"", query); /* Perform the query. if it doesn't work, log the error, close the * connection then return the error from the query processing. */ if (mysql_real_query(conn->mysql, query, strlen(query)) != 0) { dmr = build_error(cmd, conn); close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_select"); return dmr; } /* Get the data. if it doesn't work, log the error, close the * connection then return the error from the data processing. */ dmr = build_data(cmd, conn); if (MODRET_ERROR(dmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_select"); close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); return dmr; } /* close the connection, return the data. */ close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_select"); return dmr; } /* * cmd_insert: executes an INSERT query, properly constructing the query * based on the inputs. * * cmd_insert takes either exactly two inputs, or exactly four. If only * two inputs are given, the second is a monolithic query string. See * the examples below. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: table * cmd->argv[2]: field string * cmd->argv[3]: value string * * Returns: * either a properly filled error modret_t if the insert failed, or a * simple non-error modret_t. * * Example: * These are example queries that would be executed for MySQL; other * backends will have different SQL syntax. * * argv[] = "default","log","userid, date, count", "'aah', now(), 2" * query = "INSERT INTO log (userid, date, count) VALUES ('aah', now(), 2)" * * argv[] = "default"," INTO foo VALUES ('do','re','mi','fa')" * query = "INSERT INTO foo VALUES ('do','re','mi','fa')" * * Notes: * none */ MODRET cmd_insert(cmd_rec *cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; modret_t *cmr = NULL; modret_t *dmr = NULL; char *query = NULL; cmd_rec *close_cmd; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_insert"); sql_check_cmd(cmd, "cmd_insert"); if ((cmd->argc != 2) && (cmd->argc != 4)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_insert"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_insert"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; cmr = cmd_open(cmd); if (MODRET_ERROR(cmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_insert"); return cmr; } /* construct the query string */ if (cmd->argc == 2) { query = pstrcat(cmd->tmp_pool, "INSERT ", cmd->argv[1], NULL); } else { /* Make sure to properly quote the table name, as it might be a reserved * keyword; see Issue #1212. */ query = pstrcat(cmd->tmp_pool, "INSERT INTO `", cmd->argv[1], "` (", cmd->argv[2], ") VALUES (", cmd->argv[3], ")", NULL); } sql_log(DEBUG_INFO, "query \"%s\"", query); /* perform the query. if it doesn't work, log the error, close the * connection (and log any errors there, too) then return the error * from the query processing. */ if (mysql_real_query(conn->mysql, query, strlen(query)) != 0) { dmr = build_error(cmd, conn); close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_insert"); return dmr; } /* close the connection and return HANDLED. */ close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_insert"); return PR_HANDLED(cmd); } /* * cmd_update: executes an UPDATE query, properly constructing the query * based on the inputs. * * cmd_update takes either exactly two, three, or four inputs. If only * two inputs are given, the second is a monolithic query string. See * the examples below. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: table * cmd->argv[2]: update string * Optional: * cmd->argv[3]: where string * * Returns: * either a properly filled error modret_t if the update failed, or a * simple non-error modret_t. * * * Example: * These are example queries that would be executed for MySQL; other * backends will have different SQL syntax. * * argv[] = "default","user","count=count+1", "userid='joesmith'" * query = "UPDATE user SET count=count+1 WHERE userid='joesmith'" * * Notes: * argv[3] is optional -- it may be NULL, or it may not exist at all. * make sure this is handled correctly. */ MODRET cmd_update(cmd_rec *cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; modret_t *cmr = NULL; modret_t *dmr = NULL; char *query = NULL; cmd_rec *close_cmd; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_update"); sql_check_cmd(cmd, "cmd_update"); if ((cmd->argc < 2) || (cmd->argc > 4)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_update"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_update"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; cmr = cmd_open(cmd); if (MODRET_ERROR(cmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_update"); return cmr; } if (cmd->argc == 2) { query = pstrcat(cmd->tmp_pool, "UPDATE ", cmd->argv[1], NULL); } else { /* Make sure to properly quote the table name, as it might be a reserved * keyword; see Issue #1212. */ query = pstrcat(cmd->tmp_pool, "UPDATE `", cmd->argv[1], "` SET ", cmd->argv[2], NULL); if (cmd->argc > 3 && cmd->argv[3]) { query = pstrcat(cmd->tmp_pool, query, " WHERE ", cmd->argv[3], NULL); } } /* Log the query string */ sql_log(DEBUG_INFO, "query \"%s\"", query); /* Perform the query. if it doesn't work close the connection, then * return the error from the query processing. */ if (mysql_real_query(conn->mysql, query, strlen(query)) != 0) { dmr = build_error(cmd, conn); close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_update"); return dmr; } /* Close the connection, return HANDLED. */ close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_update"); return PR_HANDLED(cmd); } /* * cmd_procedure: executes a stored procedure. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: procedure name * cmd->argv[2]: procedure string * * Returns: * either a properly filled error modret_t if the procedure failed in * some way, or a modret_t with the result data. If a procedure * returns data, it should be returned in the same way as cmd_select. * * Notes: * not every backend will support stored procedures. Backends which do * not support stored procedures should return an error with a descriptive * error message (something like 'backend does not support procedures'). */ MODRET cmd_procedure(cmd_rec *cmd) { sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_procedure"); sql_check_cmd(cmd, "cmd_procedure"); if (cmd->argc != 3) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_procedure"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } /* MySQL does not support procedures. Nothing to do. */ sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_procedure"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "backend does not support procedures"); } /* * cmd_query: executes a freeform query string, with no syntax checking. * * cmd_query takes exactly two inputs, the connection and the query string. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: query string * * Returns: * depending on the query type, returns a modret_t with data, a non-error * modret_t, or a properly filled error modret_t if the query failed. * * Example: * None. The query should be passed directly to the backend database. * * Notes: * None. */ MODRET cmd_query(cmd_rec *cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; modret_t *cmr = NULL; modret_t *dmr = NULL; char *query = NULL; cmd_rec *close_cmd; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_query"); sql_check_cmd(cmd, "cmd_query"); if (cmd->argc != 2) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_query"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_query"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; cmr = cmd_open(cmd); if (MODRET_ERROR(cmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_query"); return cmr; } query = pstrcat(cmd->tmp_pool, cmd->argv[1], NULL); /* Log the query string */ sql_log(DEBUG_INFO, "query \"%s\"", query); /* Perform the query. if it doesn't work close the connection, then * return the error from the query processing. */ if (mysql_real_query(conn->mysql, query, strlen(query)) != 0) { dmr = build_error(cmd, conn); close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_query"); return dmr; } /* Get data if necessary. if it doesn't work, log the error, close the * connection then return the error from the data processing. */ if (mysql_field_count(conn->mysql) > 0) { dmr = build_data(cmd, conn); if (MODRET_ERROR(dmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_query"); } } else { dmr = PR_HANDLED(cmd); } /* close the connection, return the data. */ close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_query"); return dmr; } /* * cmd_escapestring: certain strings sent to a database should be properly * escaped -- for instance, quotes need to be escaped to insure that * a query string is properly formatted. cmd_escapestring does whatever * is necessary to escape the special characters in a string. * * Inputs: * cmd->argv[0]: connection name * cmd->argv[1]: string to escape * * Returns: * this command CANNOT fail. The return string is null-terminated and * stored in the data field of the modret_t structure. * * Notes: * Different languages may escape different characters in different ways. * A backend should handle this correctly, where possible. If there is * no client library function to do the string conversion, it is strongly * recommended that the backend module writer do whatever is necessry (read * the database documentation and figure it out) to do the conversion * themselves in this function. * * A backend MUST supply a working escapestring implementation. Simply * copying the data from argv[0] into the data field of the modret allows * for possible SQL injection attacks when this backend is used. */ MODRET cmd_escapestring(cmd_rec * cmd) { conn_entry_t *entry = NULL; db_conn_t *conn = NULL; modret_t *cmr = NULL; char *unescaped = NULL; char *escaped = NULL; cmd_rec *close_cmd; sql_log(DEBUG_FUNC, "%s", "entering \tmysql cmd_escapestring"); sql_check_cmd(cmd, "cmd_escapestring"); if (cmd->argc != 2) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_escapestring"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, "badly formed request"); } entry = sql_get_connection(cmd->argv[0]); if (entry == NULL) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_escapestring"); return PR_ERROR_MSG(cmd, MOD_SQL_MYSQL_VERSION, pstrcat(cmd->tmp_pool, "unknown named connection: ", cmd->argv[0], NULL)); } conn = (db_conn_t *) entry->data; /* Make sure the connection is open. */ cmr = cmd_open(cmd); if (MODRET_ERROR(cmr)) { sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_escapestring"); return cmr; } unescaped = cmd->argv[1]; escaped = (char *) pcalloc(cmd->tmp_pool, sizeof(char) * (strlen(unescaped) * 2) + 1); /* Note: the mysql_real_escape_string() function appeared in the C API * as of MySQL 3.23.14; this macro allows functioning with older mysql * installations. */ #if MYSQL_VERSION_ID >= 32314 mysql_real_escape_string(conn->mysql, escaped, unescaped, strlen(unescaped)); #else mysql_escape_string(escaped, unescaped, strlen(unescaped)); #endif close_cmd = sql_make_cmd(cmd->tmp_pool, 1, entry->name); cmd_close(close_cmd); SQL_FREE_CMD(close_cmd); sql_log(DEBUG_FUNC, "%s", "exiting \tmysql cmd_escapestring"); return mod_create_data(cmd, (void *) escaped); } /* Per the MySQL docs for the PASSWORD function, MySQL pre-4.1 passwords * are always 16 bytes; MySQL 4.1 passwords are 41 bytes AND start with '*'. * See: * http://dev.mysql.com/doc/refman/5.7/en/encryption-functions.html#function_password */ #define MYSQL_PASSWD_FMT_UNKNOWN -1 #define MYSQL_PASSWD_FMT_PRE41 1 #define MYSQL_PASSWD_FMT_41 2 #define MYSQL_PASSWD_FMT_SHA256 3 static int get_mysql_passwd_fmt(const char *txt, size_t txt_len) { if (txt_len == 16) { return MYSQL_PASSWD_FMT_PRE41; } if (txt_len == 41 && txt[0] == '*') { return MYSQL_PASSWD_FMT_41; } if (txt_len > 3 && txt[0] == '$' && txt[1] == '5' && txt[2] == '$') { return MYSQL_PASSWD_FMT_SHA256; } return MYSQL_PASSWD_FMT_UNKNOWN; } static int match_mysql_passwds(const char *hashed, size_t hashed_len, const char *scrambled, size_t scrambled_len, const char *scramble_func) { int hashed_fmt = 0, scrambled_fmt = 0, matched = FALSE; if (pr_trace_get_level(trace_channel) >= 7) { const char *hashed_fmt_name, *scrambled_fmt_name; hashed_fmt = get_mysql_passwd_fmt(hashed, hashed_len); scrambled_fmt = get_mysql_passwd_fmt(scrambled, scrambled_len); switch (hashed_fmt) { case MYSQL_PASSWD_FMT_PRE41: hashed_fmt_name = "pre-4.1"; break; case MYSQL_PASSWD_FMT_41: hashed_fmt_name = "4.1"; break; case MYSQL_PASSWD_FMT_SHA256: hashed_fmt_name = "SHA256"; break; default: hashed_fmt_name = "unknown"; break; } switch (scrambled_fmt) { case MYSQL_PASSWD_FMT_PRE41: scrambled_fmt_name = "pre-4.1"; break; case MYSQL_PASSWD_FMT_41: scrambled_fmt_name = "4.1"; break; case MYSQL_PASSWD_FMT_SHA256: scrambled_fmt_name = "SHA256"; break; default: scrambled_fmt_name = "unknown"; break; } pr_trace_msg(trace_channel, 7, "SQLAuthType Backend: database password format = %s, " "client library password format = %s (using %s())", hashed_fmt_name, scrambled_fmt_name, scramble_func); } /* Note here that if the scrambled value has a different length than our * expected hash, it might be a completely different format (i.e. not the * 4.1 or whatever format provided by the db). Log if this the case! * * Consider that using PASSWORD() on the server might make a 4.1 format * value, but the client lib might make a SHA256 format value. Or * vice versa. */ if (scrambled_len == hashed_len) { matched = (strncmp(scrambled, hashed, hashed_len) == 0); } if (matched == FALSE) { if (hashed_fmt == 0) { hashed_fmt = get_mysql_passwd_fmt(hashed, hashed_len); } if (scrambled_fmt == 0) { scrambled_fmt = get_mysql_passwd_fmt(scrambled, scrambled_len); } if (hashed_fmt != scrambled_fmt) { if (scrambled_fmt == MYSQL_PASSWD_FMT_SHA256) { sql_log(DEBUG_FUNC, "MySQL client library used MySQL SHA256 password format, and Backend SQLAuthType cannot succeed; consider using MD5/SHA1/SHA256 SQLAuthType using mod_sql_passwd"); switch (hashed_fmt) { case MYSQL_PASSWD_FMT_PRE41: sql_log(DEBUG_FUNC, "MySQL server used MySQL pre-4.1 password format for PASSWORD() value"); break; case MYSQL_PASSWD_FMT_41: sql_log(DEBUG_FUNC, "MySQL server used MySQL 4.1 password format for PASSWORD() value"); break; default: pr_trace_msg(trace_channel, 19, "unknown MySQL PASSWORD() format used on server"); break; } } } pr_trace_msg(trace_channel, 9, "expected '%.*s' (%lu), got '%.*s' (%lu) using MySQL %s()", (int) hashed_len, hashed, (unsigned long) hashed_len, (int) scrambled_len, scrambled, (unsigned long) scrambled_len, scramble_func); } return matched; } static modret_t *sql_mysql_password(cmd_rec *cmd, const char *plaintext, const char *ciphertext) { char scrambled[256] = {'\0'}; size_t plaintext_len = 0, ciphertext_len = 0, scrambled_len = 0; int success = 0; plaintext_len = strlen(plaintext); ciphertext_len = strlen(ciphertext); /* Checking order (damn MySQL API changes): * * my_make_scrambled_password (if available) * my_make_scrambled_password_323 (if available) * make_scrambled_password (if available) * make_scrammbed_password_323 (if available) */ #if defined(HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD) if (success == FALSE) { memset(scrambled, '\0', sizeof(scrambled)); my_make_scrambled_password(scrambled, plaintext, plaintext_len); scrambled_len = strlen(scrambled); success = match_mysql_passwds(ciphertext, ciphertext_len, scrambled, scrambled_len, "my_make_scrambled_password"); } #endif /* HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD */ #if defined(HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD_323) if (success == FALSE) { memset(scrambled, '\0', sizeof(scrambled)); sql_log(DEBUG_FUNC, "%s", "checking again using deprecated legacy MySQL password algorithm (my_make_scrambled_password_323 function)"); sql_log(DEBUG_FUNC, "%s", "warning: support for this legacy MySQ-3.xL password algorithm will be dropped from MySQL in the future"); my_make_scrambled_password_323(scrambled, plaintext, plaintext_len); scrambled_len = strlen(scrambled); success = match_mysql_passwds(ciphertext, ciphertext_len, scrambled, scrambled_len, "my_make_scrambled_password_323"); } #endif /* HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD_323 */ #if defined(HAVE_MYSQL_MAKE_SCRAMBLED_PASSWORD) if (success == FALSE) { memset(scrambled, '\0', sizeof(scrambled)); # if MYSQL_VERSION_ID >= 40100 && MYSQL_VERSION_ID < 40101 make_scrambled_password(scrambled, plaintext, 1, NULL); # else make_scrambled_password(scrambled, plaintext); # endif scrambled_len = strlen(scrambled); success = match_mysql_passwds(ciphertext, ciphertext_len, scrambled, scrambled_len, "make_scrambled_password"); } #endif /* HAVE_MYSQL_MAKE_SCRAMBLED_PASSWORD */ #if defined(HAVE_MYSQL_MAKE_SCRAMBLED_PASSWORD_323) if (success == FALSE) { memset(scrambled, '\0', sizeof(scrambled)); sql_log(DEBUG_FUNC, "%s", "checking again using deprecated legacy MySQL password algorithm (make_scrambled_password_323 function)"); sql_log(DEBUG_FUNC, "%s", "warning: support for this legacy MySQ-3.xL password algorithm will be dropped from MySQL in the future"); make_scrambled_password_323(scrambled, plaintext); scrambled_len = strlen(scrambled); success = match_mysql_passwds(ciphertext, ciphertext_len, scrambled, scrambled_len, "make_scrambled_password_323"); } #endif /* HAVE_MYSQL_MAKE_SCRAMBLED_PASSWORD_323 */ if (success == FALSE) { sql_log(DEBUG_FUNC, "%s", "password mismatch"); } return success ? PR_HANDLED(cmd) : PR_ERROR_INT(cmd, PR_AUTH_BADPWD); } /* * cmd_identify: returns API information and an identification string for * the backend handler. mod_sql will call this at initialization and * display the identification string. The API version information is * used by mod_sql to identify available command handlers. * * Inputs: * None. The cmd->tmp_pool can be used to construct the return data, but * do not depend on any other portion of the cmd_rec to be useful in any way. * * Returns: * A sql_data_t of *exactly* this form: * sql_data_t->rnum = 1; * sql_data_t->fnum = 2; * sql_data_t->data[0] = "identification string" * sql_data_t->data[0] = "API version" * * Notes: * See mod_sql.h for currently accepted APIs. */ MODRET cmd_identify(cmd_rec * cmd) { sql_data_t *sd = NULL; sql_check_cmd(cmd, "cmd_identify"); sd = (sql_data_t *) pcalloc(cmd->tmp_pool, sizeof(sql_data_t)); sd->data = (char **) pcalloc(cmd->tmp_pool, sizeof(char *) * 2); sd->rnum = 1; sd->fnum = 2; sd->data[0] = MOD_SQL_MYSQL_VERSION; sd->data[1] = MOD_SQL_API_V1; return mod_create_data(cmd, (void *) sd); } /* * cmd_prepare: prepares this mod_sql_mysql module for running. * * Inputs: * cmd->argv[0]: A pool to be used for any necessary preparations. * * Returns: * Success. */ MODRET cmd_prepare(cmd_rec *cmd) { if (cmd->argc != 1) { return PR_ERROR(cmd); } conn_pool = (pool *) cmd->argv[0]; if (conn_cache == NULL) { conn_cache = make_array(conn_pool, DEF_CONN_POOL_SIZE, sizeof(conn_entry_t *)); } return mod_create_data(cmd, NULL); } /* * cmd_cleanup: cleans up any initialisations made during module preparations * (see cmd_prepre). * * Inputs: * None. * * Returns: * Success. */ MODRET cmd_cleanup(cmd_rec *cmd) { destroy_pool(conn_pool); conn_pool = NULL; conn_cache = NULL; return mod_create_data(cmd, NULL); } /* SQL cmdtable: mod_sql requires each backend module to define a cmdtable * with this exact name. ALL these functions must be defined; mod_sql checks * that they all exist on startup and ProFTPD will refuse to start if they * aren't defined. */ static cmdtable sql_mysql_cmdtable[] = { { CMD, "sql_close", G_NONE, cmd_close, FALSE, FALSE }, { CMD, "sql_cleanup", G_NONE, cmd_cleanup, FALSE, FALSE }, { CMD, "sql_defineconnection", G_NONE, cmd_defineconnection, FALSE, FALSE }, { CMD, "sql_escapestring", G_NONE, cmd_escapestring, FALSE, FALSE }, { CMD, "sql_exit", G_NONE, cmd_exit, FALSE, FALSE }, { CMD, "sql_identify", G_NONE, cmd_identify, FALSE, FALSE }, { CMD, "sql_insert", G_NONE, cmd_insert, FALSE, FALSE }, { CMD, "sql_open", G_NONE, cmd_open, FALSE, FALSE }, { CMD, "sql_prepare", G_NONE, cmd_prepare, FALSE, FALSE }, { CMD, "sql_procedure", G_NONE, cmd_procedure, FALSE, FALSE }, { CMD, "sql_query", G_NONE, cmd_query, FALSE, FALSE }, { CMD, "sql_select", G_NONE, cmd_select, FALSE, FALSE }, { CMD, "sql_update", G_NONE, cmd_update, FALSE, FALSE }, { 0, NULL } }; /* Configuration handlers */ MODRET set_sqlauthtypes(cmd_rec *cmd) { #if MYSQL_VERSION_ID >= 50600 && \ !defined(HAVE_MYSQL_MAKE_SCRAMBLED_PASSWORD) && \ !defined(HAVE_MYSQL_MAKE_SCRAMBLED_PASSWORD_323) && \ !defined(HAVE_MYSQL_MY_MAKE_SCRAMBLED_PASSWORD_323) register unsigned int i; /* If we are using MySQL 5.6.x or later, AND we only have the * my_make_scrambled_password() MySQL function available, AND the Backend * SQLAuthType is used, then we must fail the directive; see Bug#4281. */ for (i = 1; i < cmd->argc; i++) { const char *auth_type; auth_type = cmd->argv[i]; if (strcasecmp(auth_type, "Backend") == 0) { pr_log_pri(PR_LOG_NOTICE, "%s: WARNING: MySQL client library uses MySQL SHA256 password format, and Backend SQLAuthType cannot succeed; consider using MD5/SHA1/SHA256 SQLAuthType using mod_sql_passwd", (char *) cmd->argv[0]); break; } } #endif return PR_DECLINED(cmd); } /* Event handlers */ static void sql_mysql_mod_load_ev(const void *event_data, void *user_data) { if (strcmp("mod_sql_mysql.c", (const char *) event_data) == 0) { /* Register ourselves with mod_sql. */ if (sql_register_backend("mysql", sql_mysql_cmdtable) < 0) { pr_log_pri(PR_LOG_NOTICE, MOD_SQL_MYSQL_VERSION ": notice: error registering backend: %s", strerror(errno)); pr_session_end(0); } } } static void sql_mysql_mod_unload_ev(const void *event_data, void *user_data) { if (strcmp("mod_sql_mysql.c", (const char *) event_data) == 0) { /* Unregister ourselves from all events. */ pr_event_unregister(&sql_mysql_module, NULL, NULL); /* Unegister ourselves with mod_sql. */ (void) sql_unregister_authtype("Backend"); if (sql_unregister_backend("mysql") < 0) { pr_log_pri(PR_LOG_NOTICE, MOD_SQL_MYSQL_VERSION ": notice: error unregistering backend: %s", strerror(errno)); pr_session_end(0); } } } /* Initialization routines */ static int sql_mysql_init(void) { /* Register listeners for the load and unload events. */ pr_event_register(&sql_mysql_module, "core.module-load", sql_mysql_mod_load_ev, NULL); pr_event_register(&sql_mysql_module, "core.module-unload", sql_mysql_mod_unload_ev, NULL); /* Register our auth handler. */ (void) sql_register_authtype("Backend", sql_mysql_password); return 0; } static int sql_mysql_sess_init(void) { if (conn_pool != NULL) { destroy_pool(conn_pool); conn_cache = NULL; } conn_pool = make_sub_pool(session.pool); pr_pool_tag(conn_pool, "MySQL connection pool"); if (conn_cache == NULL) { conn_cache = make_array(conn_pool, DEF_CONN_POOL_SIZE, sizeof(conn_entry_t *)); } return 0; } static conftable sql_mysql_conftab[] = { { "SQLAuthTypes", set_sqlauthtypes, NULL }, { NULL, NULL, NULL } }; /* sql_mysql_module: The standard module struct for all ProFTPD modules. * We use the pre-fork handler to initialize the conn_cache array header. * Other backend modules may not need any init functions, or may need * to extend the init functions to initialize other internal variables. */ module sql_mysql_module = { /* Always NULL */ NULL, NULL, /* Module API version */ 0x20, /* Module name */ "sql_mysql", /* Module configuration directive handlers */ sql_mysql_conftab, /* Module command handlers */ NULL, /* Module authentication handlers */ NULL, /* Module initialization */ sql_mysql_init, /* Session initialization */ sql_mysql_sess_init, /* Module version */ MOD_SQL_MYSQL_VERSION };
maytechnet/proftpd
contrib/mod_sql_mysql.c
C
gpl-2.0
65,446
/* Copyright (C) 2006 Christian Schneider * * This file is part of Nomad. * * Nomad is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Nomad is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Nomad; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* * Created on May 9, 2006 */ package net.sf.nmedit.nmutils.collections2; import java.util.NoSuchElementException; /** * A lightweight stack implementation. * * The basic stack operations {@link #add(T)}, * {@link #getTop()}, {@link #removeTop()}, {@link #isEmpty()}, * {@link #clear()} are performed in constant time <code>O(1)</code>. * * @author Christian Schneider */ public class List<T> { // the linked list private ListEntry<T> list = null; // predecessor of current selection private ListEntry<T> predecessor = null; // current selection private ListEntry<T> selection = null; /** * Returns the internal list. * @return the internal list */ public ListEntry<T> getInternalList() { return list; } /** * Returns the size of the list. * The operation requires time <code>O(n)</code> * @return the size of the list. */ public int size() { if (list == null) return 0; ListEntry pos = list; int size = 0; while (pos!=null) { size++; pos=pos.remaining; } return size; } /** * Adds the specified item to the front of the list. * The operation is performed in constant time <code>O(1)</code>. * @param item */ public void add(T item) { list = new ListEntry<T>(item, list); } /** * Returns true when the list contains the specified item. * The operation is performed in time <code>O(n)</code>. * Repeated calls with the same argument are performed * in constant time <code>O(1)</code> * * @param item the item * @return true when the list contains the specified item */ public boolean contains(T item) { return select(item); } /** * Returns the top element of the list. * @return the top element of the list */ public T getTop() { if (isEmpty()) throw new NoSuchElementException(); return list.item; } /** * Removes the top element of the list. * * @return the top element of the list. * @throws NoSuchElementException if the list was empty */ public T removeTop() { if (isEmpty()) throw new NoSuchElementException(); if (list==selection||list==predecessor) deselect(); ListEntry<T> entry = list; list = list.remaining; entry.remaining = null; return entry.item; } /** * Removes the specified element from the list. * The operation is performed in time <code>O(n)</code>. * If the {@link #contains(T)} opertion was called before * with the same argument, and the specified item is in the list, * the operation is performed in constant time <code>O(1)</code>. * * @param item * @return <code>true</code> when the element was found and removed. */ public boolean remove(T item) { if (select(item)) { if (list == selection) { list = selection.remaining; } else { predecessor.remaining = selection.remaining; } selection.remaining = null; deselect(); return true; } else return false; } /** * Returns the predecessor of the {@link ListEntry} that was * selected by {@link #select(T)}. * @return the predecessor of the current selection */ protected ListEntry<T> getPredecessor() { return predecessor; } /** * Returns the {@link ListEntry} that was * selected by {@link #select(T)}. * @return the current selection */ protected ListEntry<T> getSelection() { return selection; } /** * Selects the specified item. * * @param item * @return returns true, when the specified item is in the list */ protected boolean select(T item) { if (list==null) { deselect(); return false; // nothing to select } else if (selection!=null && selection.item==item) { return true; // already selected } else if (list.item==item) { predecessor = null; selection = list; // select return true; } else { // search in list predecessor = list; selection = list.remaining; while (selection!=null) { if (selection.item==item) { // found return true; } predecessor = selection; selection = selection.remaining; } // item not in list predecessor = null; return false; } } /** * deselects the current selection */ protected void deselect() { predecessor = null; selection = null; } /** * Returns <code>true</code> when the list is empty * @return <code>true</code> when the list is empty */ public boolean isEmpty() { return list==null; } /** * Removes each element from the list. */ public void clear() { list = null; } }
wesen/nmedit
libs/nmutils/src/net/sf/nmedit/nmutils/collections2/List.java
Java
gpl-2.0
6,356
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!-- NewPage --> <html lang="en"> <head> <!-- Generated by javadoc (version 1.7.0_17) on Mon Dec 02 20:33:04 CET 2013 --> <title>Uses of Class org.lwjgl.opengl.ARBVertexArrayBgra (LWJGL API)</title> <meta name="date" content="2013-12-02"> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style"> </head> <body> <script type="text/javascript"><!-- if (location.href.indexOf('is-external=true') == -1) { parent.document.title="Uses of Class org.lwjgl.opengl.ARBVertexArrayBgra (LWJGL API)"; } //--> </script> <noscript> <div>JavaScript is disabled on your browser.</div> </noscript> <!-- ========= START OF TOP NAVBAR ======= --> <div class="topNav"><a name="navbar_top"> <!-- --> </a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../org/lwjgl/opengl/ARBVertexArrayBgra.html" title="class in org.lwjgl.opengl">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../../../../overview-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../index.html?org/lwjgl/opengl/class-use/ARBVertexArrayBgra.html" target="_top">Frames</a></li> <li><a href="ARBVertexArrayBgra.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_top"> <li><a href="../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_top"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_top"> <!-- --> </a></div> <!-- ========= END OF TOP NAVBAR ========= --> <div class="header"> <h2 title="Uses of Class org.lwjgl.opengl.ARBVertexArrayBgra" class="title">Uses of Class<br>org.lwjgl.opengl.ARBVertexArrayBgra</h2> </div> <div class="classUseContainer">No usage of org.lwjgl.opengl.ARBVertexArrayBgra</div> <!-- ======= START OF BOTTOM NAVBAR ====== --> <div class="bottomNav"><a name="navbar_bottom"> <!-- --> </a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow"> <!-- --> </a> <ul class="navList" title="Navigation"> <li><a href="../../../../overview-summary.html">Overview</a></li> <li><a href="../package-summary.html">Package</a></li> <li><a href="../../../../org/lwjgl/opengl/ARBVertexArrayBgra.html" title="class in org.lwjgl.opengl">Class</a></li> <li class="navBarCell1Rev">Use</li> <li><a href="../../../../overview-tree.html">Tree</a></li> <li><a href="../../../../deprecated-list.html">Deprecated</a></li> <li><a href="../../../../index-all.html">Index</a></li> <li><a href="../../../../help-doc.html">Help</a></li> </ul> </div> <div class="subNav"> <ul class="navList"> <li>Prev</li> <li>Next</li> </ul> <ul class="navList"> <li><a href="../../../../index.html?org/lwjgl/opengl/class-use/ARBVertexArrayBgra.html" target="_top">Frames</a></li> <li><a href="ARBVertexArrayBgra.html" target="_top">No Frames</a></li> </ul> <ul class="navList" id="allclasses_navbar_bottom"> <li><a href="../../../../allclasses-noframe.html">All Classes</a></li> </ul> <div> <script type="text/javascript"><!-- allClassesLink = document.getElementById("allclasses_navbar_bottom"); if(window==top) { allClassesLink.style.display = "block"; } else { allClassesLink.style.display = "none"; } //--> </script> </div> <a name="skip-navbar_bottom"> <!-- --> </a></div> <!-- ======== END OF BOTTOM NAVBAR ======= --> <p class="legalCopy"><small><i>Copyright &#169; 2002-2009 lwjgl.org. All Rights Reserved.</i></small></p> </body> </html>
filthy-mattress/JPong
lwjgl-2.9.1/javadoc/org/lwjgl/opengl/class-use/ARBVertexArrayBgra.html
HTML
gpl-2.0
4,331
cmd_/home/shizhai/new_zhongwang/trunk/build_dir/toolchain-mips_r2_gcc-4.6-linaro_uClibc-0.9.33.2/linux-dev//include/linux/wimax/.install := /bin/bash scripts/headers_install.sh /home/shizhai/new_zhongwang/trunk/build_dir/toolchain-mips_r2_gcc-4.6-linaro_uClibc-0.9.33.2/linux-dev//include/linux/wimax /home/shizhai/new_zhongwang/trunk/build_dir/toolchain-mips_r2_gcc-4.6-linaro_uClibc-0.9.33.2/linux-3.10.4/include/uapi/linux/wimax/i2400m.h ; for F in ; do echo "\#include <asm-generic/$$F>" > /home/shizhai/new_zhongwang/trunk/build_dir/toolchain-mips_r2_gcc-4.6-linaro_uClibc-0.9.33.2/linux-dev//include/linux/wimax/$$F; done; touch /home/shizhai/new_zhongwang/trunk/build_dir/toolchain-mips_r2_gcc-4.6-linaro_uClibc-0.9.33.2/linux-dev//include/linux/wimax/.install
shizhai/wprobe
staging_dir/toolchain-mips_r2_gcc-4.6-linaro_uClibc-0.9.33.2/include/linux/wimax/..install.cmd
Batchfile
gpl-2.0
770
{-# LANGUAGE TemplateHaskell, DeriveDataTypeable #-} module Graph.MST.Config where import Autolib.ToDoc import Autolib.Reader import Data.Typeable data Config = Config { nodes :: Int , edges :: Int , weight_bounds :: (Int,Int) } deriving ( Typeable ) $(derives [makeReader, makeToDoc] [''Config]) rc :: Config rc = Config { nodes = 15 , edges = 30 , weight_bounds = ( 1, 100 ) }
Erdwolf/autotool-bonn
src/Graph/MST/Config.hs
Haskell
gpl-2.0
450
/*************************************************************************** qgscollapsiblegroupbox.cpp ------------------- begin : August 2012 copyright : (C) 2012 by Etienne Tourigny email : etourigny dot dev at gmail dot com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgscollapsiblegroupbox.h" #include "qgsapplication.h" #include "qgslogger.h" #include <QToolButton> #include <QMouseEvent> #include <QPushButton> #include <QStyleOptionGroupBox> #include <QSettings> #include <QScrollArea> QIcon QgsCollapsibleGroupBoxBasic::mCollapseIcon; QIcon QgsCollapsibleGroupBoxBasic::mExpandIcon; QgsCollapsibleGroupBoxBasic::QgsCollapsibleGroupBoxBasic( QWidget *parent ) : QGroupBox( parent ) { init(); } QgsCollapsibleGroupBoxBasic::QgsCollapsibleGroupBoxBasic( const QString &title, QWidget *parent ) : QGroupBox( title, parent ) { init(); } QgsCollapsibleGroupBoxBasic::~QgsCollapsibleGroupBoxBasic() { //QgsDebugMsg( "Entered" ); } void QgsCollapsibleGroupBoxBasic::init() { //QgsDebugMsg( "Entered" ); // variables mCollapsed = false; mInitFlat = false; mInitFlatChecked = false; mScrollOnExpand = true; mShown = false; mParentScrollArea = 0; mSyncParent = 0; mSyncGroup = ""; mAltDown = false; mShiftDown = false; mTitleClicked = false; // init icons if ( mCollapseIcon.isNull() ) { mCollapseIcon = QgsApplication::getThemeIcon( "/mIconCollapse.png" ); mExpandIcon = QgsApplication::getThemeIcon( "/mIconExpand.png" ); } // collapse button mCollapseButton = new QgsGroupBoxCollapseButton( this ); mCollapseButton->setObjectName( "collapseButton" ); mCollapseButton->setAutoRaise( true ); mCollapseButton->setFixedSize( 16, 16 ); // TODO set size (as well as margins) depending on theme, in updateStyle() mCollapseButton->setIconSize( QSize( 12, 12 ) ); mCollapseButton->setIcon( mCollapseIcon ); connect( mCollapseButton, SIGNAL( clicked() ), this, SLOT( toggleCollapsed() ) ); connect( this, SIGNAL( toggled( bool ) ), this, SLOT( checkToggled( bool ) ) ); connect( this, SIGNAL( clicked( bool ) ), this, SLOT( checkClicked( bool ) ) ); } void QgsCollapsibleGroupBoxBasic::showEvent( QShowEvent * event ) { //QgsDebugMsg( "Entered" ); // initialise widget on first show event only if ( mShown ) { event->accept(); return; } // check if groupbox was set to flat in Designer or in code if ( !mInitFlatChecked ) { mInitFlat = isFlat(); mInitFlatChecked = true; } // find parent QScrollArea - this might not work in complex layouts - should we look deeper? if ( parent() && parent()->parent() ) mParentScrollArea = dynamic_cast<QScrollArea*>( parent()->parent()->parent() ); else mParentScrollArea = 0; if ( mParentScrollArea ) { QgsDebugMsg( "found a QScrollArea parent: " + mParentScrollArea->objectName() ); } else { QgsDebugMsg( "did not find a QScrollArea parent" ); } updateStyle(); // expand if needed - any calls to setCollapsed() before only set mCollapsed, but have UI effect if ( mCollapsed ) { setCollapsed( mCollapsed ); } else { // emit signal for connections using collapsed state emit collapsedStateChanged( isCollapsed() ); } // verify triangle mirrors groupbox's enabled state mCollapseButton->setEnabled( isEnabled() ); // set mShown after first setCollapsed call or expanded groupboxes // will scroll scroll areas when first shown mShown = true; event->accept(); } void QgsCollapsibleGroupBoxBasic::mousePressEvent( QMouseEvent *event ) { // avoid leaving checkbox in pressed state if alt- or shift-clicking if ( event->modifiers() & ( Qt::AltModifier | Qt::ControlModifier | Qt::ShiftModifier ) && titleRect().contains( event->pos() ) && isCheckable() ) { event->ignore(); return; } // default behaviour - pass to QGroupBox QGroupBox::mousePressEvent( event ); } void QgsCollapsibleGroupBoxBasic::mouseReleaseEvent( QMouseEvent *event ) { mAltDown = ( event->modifiers() & ( Qt::AltModifier | Qt::ControlModifier ) ); mShiftDown = ( event->modifiers() & Qt::ShiftModifier ); mTitleClicked = ( titleRect().contains( event->pos() ) ); // sync group when title is alt-clicked // collapse/expand when title is clicked and non-checkable // expand current and collapse others on shift-click if ( event->button() == Qt::LeftButton && mTitleClicked && ( mAltDown || mShiftDown || !isCheckable() ) ) { toggleCollapsed(); return; } // default behaviour - pass to QGroupBox QGroupBox::mouseReleaseEvent( event ); } void QgsCollapsibleGroupBoxBasic::changeEvent( QEvent *event ) { // always re-enable mCollapseButton when groupbox was previously disabled // e.g. resulting from a disabled parent of groupbox, or a signal/slot connection // default behaviour - pass to QGroupBox QGroupBox::changeEvent( event ); if ( event->type() == QEvent::EnabledChange && isEnabled() ) mCollapseButton->setEnabled( true ); } void QgsCollapsibleGroupBoxBasic::setSyncGroup( QString grp ) { mSyncGroup = grp; QString tipTxt = QString( "" ); if ( !grp.isEmpty() ) { tipTxt = tr( "Ctrl(or Alt)-click to toggle all" ) + "\n" + tr( "Shift-click to expand, then collapse others" ); } mCollapseButton->setToolTip( tipTxt ); } QRect QgsCollapsibleGroupBoxBasic::titleRect() const { QStyleOptionGroupBox box; initStyleOption( &box ); return style()->subControlRect( QStyle::CC_GroupBox, &box, QStyle::SC_GroupBoxLabel, this ); } void QgsCollapsibleGroupBoxBasic::clearModifiers() { mCollapseButton->setAltDown( false ); mCollapseButton->setShiftDown( false ); mAltDown = false; mShiftDown = false; } void QgsCollapsibleGroupBoxBasic::checkToggled( bool chkd ) { Q_UNUSED( chkd ); mCollapseButton->setEnabled( true ); // always keep enabled } void QgsCollapsibleGroupBoxBasic::checkClicked( bool chkd ) { // expand/collapse when checkbox toggled by user click. // don't do this on toggle signal, otherwise group boxes will default to collapsed // in option dialog constructors, reducing discovery of options by new users and // overriding user's auto-saved collapsed/expanded state for the group box if ( chkd && isCollapsed() ) setCollapsed( false ); else if ( ! chkd && ! isCollapsed() ) setCollapsed( true ); } void QgsCollapsibleGroupBoxBasic::toggleCollapsed() { // verify if sender is this group box's collapse button bool senderCollBtn = false; QgsGroupBoxCollapseButton* collBtn = qobject_cast<QgsGroupBoxCollapseButton*>( QObject::sender() ); senderCollBtn = ( collBtn && collBtn == mCollapseButton ); mAltDown = ( mAltDown || mCollapseButton->altDown() ); mShiftDown = ( mShiftDown || mCollapseButton->shiftDown() ); // find any sync group siblings and toggle them if (( senderCollBtn || mTitleClicked ) && ( mAltDown || mShiftDown ) && !mSyncGroup.isEmpty() ) { QgsDebugMsg( "Alt or Shift key down, syncing group" ); // get pointer to parent or grandparent widget if ( parentWidget() ) { mSyncParent = parentWidget(); if ( mSyncParent->parentWidget() ) { // don't use whole app for grandparent (common for dialogs that use main window for parent) if ( mSyncParent->parentWidget()->objectName() != QString( "QgisApp" ) ) { mSyncParent = mSyncParent->parentWidget(); } } } else { mSyncParent = 0; } if ( mSyncParent ) { QgsDebugMsg( "found sync parent: " + mSyncParent->objectName() ); bool thisCollapsed = mCollapsed; // get state of current box before its changed foreach ( QgsCollapsibleGroupBoxBasic *grpbox, mSyncParent->findChildren<QgsCollapsibleGroupBoxBasic*>() ) { if ( grpbox->syncGroup() == syncGroup() && grpbox->isEnabled() ) { if ( mShiftDown && grpbox == dynamic_cast<QgsCollapsibleGroupBoxBasic *>( this ) ) { // expand current group box on shift-click setCollapsed( false ); } else { grpbox->setCollapsed( mShiftDown ? true : !thisCollapsed ); } } } clearModifiers(); return; } else { QgsDebugMsg( "did not find a sync parent" ); } } // expand current group box on shift-click, even if no sync group if ( mShiftDown ) { setCollapsed( false ); } else { setCollapsed( !mCollapsed ); } clearModifiers(); } void QgsCollapsibleGroupBoxBasic::updateStyle() { setUpdatesEnabled( false ); QSettings settings; // NOTE: QGIS-Style groupbox styled in app stylesheet bool usingQgsStyle = settings.value( "qgis/stylesheet/groupBoxCustom", QVariant( false ) ).toBool(); QStyleOptionGroupBox box; initStyleOption( &box ); QRect rectFrame = style()->subControlRect( QStyle::CC_GroupBox, &box, QStyle::SC_GroupBoxFrame, this ); QRect rectTitle = titleRect(); // margin/offset defaults int marginLeft = 20; // title margin for disclosure triangle int marginRight = 5; // a little bit of space on the right, to match space on the left int offsetLeft = 0; // offset for oxygen theme int offsetStyle = QApplication::style()->objectName().contains( "macintosh" ) ? ( usingQgsStyle ? 1 : 8 ) : 0; int topBuffer = ( usingQgsStyle ? 3 : 1 ) + offsetStyle; // space between top of title or triangle and widget above int offsetTop = topBuffer; int offsetTopTri = topBuffer; // offset for triangle if ( mCollapseButton->height() < rectTitle.height() ) // triangle's height > title text's, offset triangle { offsetTopTri += ( rectTitle.height() - mCollapseButton->height() ) / 2 ; // offsetTopTri += rectTitle.top(); } else if ( rectTitle.height() < mCollapseButton->height() ) // title text's height < triangle's, offset title { offsetTop += ( mCollapseButton->height() - rectTitle.height() ) / 2; } // calculate offset if frame overlaps triangle (oxygen theme) // using an offset of 6 pixels from frame border if ( QApplication::style()->objectName().toLower() == "oxygen" ) { QStyleOptionGroupBox box; initStyleOption( &box ); QRect rectFrame = style()->subControlRect( QStyle::CC_GroupBox, &box, QStyle::SC_GroupBoxFrame, this ); QRect rectCheckBox = style()->subControlRect( QStyle::CC_GroupBox, &box, QStyle::SC_GroupBoxCheckBox, this ); if ( rectFrame.left() <= 0 ) offsetLeft = 6 + rectFrame.left(); if ( rectFrame.top() <= 0 ) { if ( isCheckable() ) { // if is checkable align with checkbox offsetTop = ( rectCheckBox.height() / 2 ) - ( mCollapseButton->height() / 2 ) + rectCheckBox.top(); offsetTopTri = offsetTop + 1; } else { offsetTop = 6 + rectFrame.top(); offsetTopTri = offsetTop; } } } QgsDebugMsg( QString( "groupbox: %1 style: %2 offset: left=%3 top=%4 top2=%5" ).arg( objectName() ).arg( QApplication::style()->objectName() ).arg( offsetLeft ).arg( offsetTop ).arg( offsetTopTri ) ); // customize style sheet for collapse/expand button and force left-aligned title QString ss; if ( usingQgsStyle || QApplication::style()->objectName().contains( "macintosh" ) ) { ss += "QgsCollapsibleGroupBoxBasic, QgsCollapsibleGroupBox {"; ss += QString( " margin-top: %1px;" ).arg( topBuffer + ( usingQgsStyle ? rectTitle.height() + 5 : rectFrame.top() ) ); ss += "}"; } ss += "QgsCollapsibleGroupBoxBasic::title, QgsCollapsibleGroupBox::title {"; ss += " subcontrol-origin: margin;"; ss += " subcontrol-position: top left;"; ss += QString( " margin-left: %1px;" ).arg( marginLeft ); ss += QString( " margin-right: %1px;" ).arg( marginRight ); ss += QString( " left: %1px;" ).arg( offsetLeft ); ss += QString( " top: %1px;" ).arg( offsetTop ); if ( QApplication::style()->objectName().contains( "macintosh" ) ) { ss += " background-color: rgba(0,0,0,0)"; } ss += "}"; setStyleSheet( ss ); // clear toolbutton default background and border and apply offset QString ssd; ssd = QString( "QgsCollapsibleGroupBoxBasic > QToolButton#%1, QgsCollapsibleGroupBox > QToolButton#%1 {" ).arg( mCollapseButton->objectName() ); ssd += " background-color: rgba(255, 255, 255, 0); border: none;"; ssd += "}"; mCollapseButton->setStyleSheet( ssd ); if ( offsetLeft != 0 || offsetTopTri != 0 ) mCollapseButton->move( offsetLeft, offsetTopTri ); setUpdatesEnabled( true ); } void QgsCollapsibleGroupBoxBasic::setCollapsed( bool collapse ) { mCollapsed = collapse; if ( !isVisible() ) return; // for consistent look/spacing across platforms when collapsed if ( ! mInitFlat ) // skip if initially set to flat in Designer setFlat( collapse ); // avoid flicker in X11 // NOTE: this causes app to crash when loading a project that hits a group box with // 'collapse' set via dynamic property or in code (especially if auto-launching project) // TODO: find another means of avoiding the X11 flicker // QApplication::processEvents(); // handle visual fixes for collapsing/expanding collapseExpandFixes(); // set maximum height to hide contents - does this work in all envs? // setMaximumHeight( collapse ? 25 : 16777215 ); setMaximumHeight( collapse ? titleRect().bottom() + 6 : 16777215 ); mCollapseButton->setIcon( collapse ? mExpandIcon : mCollapseIcon ); // if expanding and is in a QScrollArea, scroll down to make entire widget visible if ( mShown && mScrollOnExpand && !collapse && mParentScrollArea ) { // process events so entire widget is shown QApplication::processEvents(); mParentScrollArea->ensureWidgetVisible( this ); } // emit signal for connections using collapsed state emit collapsedStateChanged( isCollapsed() ); } void QgsCollapsibleGroupBoxBasic::collapseExpandFixes() { if ( QApplication::style()->objectName().contains( "macintosh" ) ) { // handle QPushButtons in form layouts that stay partly visible on collapse (Qt bug?) // hide on collapse for fix, but only show buttons that were specifically hidden when expanding // key hiding off of this group box's object name so it does not affect child group boxes const QByteArray objKey = QString( "CollGrpBxHiddenButton_%1" ).arg( objectName() ).toUtf8(); const char* pbHideKey = objKey.constData(); // handle child group box widgets that don't hide their frames on collapse of parent const char* gbHideKey = "CollGrpBxHideGrpBx"; if ( mCollapsed ) { // first hide all child group boxes, regardless of whether they are collapsible foreach ( QGroupBox* gbx, findChildren<QGroupBox *>() ) { if ( gbx->isVisible() && !gbx->property( gbHideKey ).isValid() ) { gbx->setProperty( gbHideKey, QVariant( true ) ); gbx->hide(); } } // hide still visible push buttons belonging to this group box foreach ( QPushButton* pBtn, findChildren<QPushButton *>() ) { if ( pBtn->isVisible() && !pBtn->property( pbHideKey ).isValid() ) { pBtn->setProperty( pbHideKey, QVariant( true ) ); pBtn->hide(); } } } else // on expand { // first show push buttons belonging to this group box foreach ( QPushButton* pBtn, findChildren<QPushButton *>() ) { if ( pBtn->property( pbHideKey ).isValid() ) // don't have to check bool value { pBtn->setProperty( pbHideKey, QVariant() ); // remove property pBtn->show(); } } // show all hidden child group boxes foreach ( QGroupBox* gbx, findChildren<QGroupBox *>() ) { if ( gbx->property( gbHideKey ).isValid() ) // don't have to check bool value { gbx->setProperty( gbHideKey, QVariant() ); // remove property gbx->show(); } } } } } // ---- QgsCollapsibleGroupBox::QgsCollapsibleGroupBox( QWidget *parent, QSettings* settings ) : QgsCollapsibleGroupBoxBasic( parent ), mSettings( settings ) { init(); } QgsCollapsibleGroupBox::QgsCollapsibleGroupBox( const QString &title, QWidget *parent, QSettings* settings ) : QgsCollapsibleGroupBoxBasic( title, parent ), mSettings( settings ) { init(); } QgsCollapsibleGroupBox::~QgsCollapsibleGroupBox() { //QgsDebugMsg( "Entered" ); saveState(); if ( mDelSettings ) // local settings obj to delete delete mSettings; mSettings = 0; // null the pointer (in case of outside settings obj) } void QgsCollapsibleGroupBox::setSettings( QSettings* settings ) { if ( mDelSettings ) // local settings obj to delete delete mSettings; mSettings = settings; mDelSettings = false; // don't delete outside obj } void QgsCollapsibleGroupBox::init() { //QgsDebugMsg( "Entered" ); // use pointer to app qsettings if no custom qsettings specified // custom qsettings object may be from Python plugin mDelSettings = false; if ( !mSettings ) { mSettings = new QSettings(); mDelSettings = true; // only delete obj created by class } // variables mSaveCollapsedState = true; // NOTE: only turn on mSaveCheckedState for groupboxes NOT used // in multiple places or used as options for different parent objects mSaveCheckedState = false; mSettingGroup = ""; // if not set, use window object name } void QgsCollapsibleGroupBox::showEvent( QShowEvent * event ) { //QgsDebugMsg( "Entered" ); // initialise widget on first show event only if ( mShown ) { event->accept(); return; } // check if groupbox was set to flat in Designer or in code if ( !mInitFlatChecked ) { mInitFlat = isFlat(); mInitFlatChecked = true; } loadState(); QgsCollapsibleGroupBoxBasic::showEvent( event ); } QString QgsCollapsibleGroupBox::saveKey() const { // save key for load/save state // currently QgsCollapsibleGroupBox/window()/object QString saveKey = "/" + objectName(); // QObject* parentWidget = parent(); // while ( parentWidget != NULL ) // { // saveKey = "/" + parentWidget->objectName() + saveKey; // parentWidget = parentWidget->parent(); // } // if ( parent() != NULL ) // saveKey = "/" + parent()->objectName() + saveKey; QString setgrp = mSettingGroup.isEmpty() ? window()->objectName() : mSettingGroup; saveKey = "/" + setgrp + saveKey; saveKey = "QgsCollapsibleGroupBox" + saveKey; return saveKey; } void QgsCollapsibleGroupBox::loadState() { //QgsDebugMsg( "Entered" ); if ( !mSettings ) return; if ( !isEnabled() || ( !mSaveCollapsedState && !mSaveCheckedState ) ) return; setUpdatesEnabled( false ); QString key = saveKey(); QVariant val; if ( mSaveCheckedState ) { val = mSettings->value( key + "/checked" ); if ( ! val.isNull() ) setChecked( val.toBool() ); } if ( mSaveCollapsedState ) { val = mSettings->value( key + "/collapsed" ); if ( ! val.isNull() ) setCollapsed( val.toBool() ); } setUpdatesEnabled( true ); } void QgsCollapsibleGroupBox::saveState() { //QgsDebugMsg( "Entered" ); if ( !mSettings ) return; if ( !isEnabled() || ( !mSaveCollapsedState && !mSaveCheckedState ) ) return; QString key = saveKey(); if ( mSaveCheckedState ) mSettings->setValue( key + "/checked", isChecked() ); if ( mSaveCollapsedState ) mSettings->setValue( key + "/collapsed", isCollapsed() ); }
innotechsoftware/Quantum-GIS
src/gui/qgscollapsiblegroupbox.cpp
C++
gpl-2.0
20,418
/* * Copyright (c) 2010 José Luis Vergara <pentalis@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "kis_waterymix_options.h" #include "ui_wdgwaterymixoptions.h" class KisWateryMixOptionsWidget: public QWidget, public Ui::WdgWateryMixOptions { public: KisWateryMixOptionsWidget(QWidget *parent = 0) : QWidget(parent) { setupUi(this); } }; KisWateryMixOptions::KisWateryMixOptions() : KisPaintOpOption(i18n("WateryMix options"), KisPaintOpOption::brushCategory(), false) { m_checkable = false; m_options = new KisWateryMixOptionsWidget(); // connect(m_options->separationIntervalSpinBox, SIGNAL(valueChanged(int)),SIGNAL(sigSettingChanged())); setConfigurationPage(m_options); } KisWateryMixOptions::~KisWateryMixOptions() { } void KisWateryMixOptions::writeOptionSetting(KisPropertiesConfiguration* setting) const { // setting->setProperty("WateryMix/separationintervals", m_options->separationIntervalSpinBox->value() ); } void KisWateryMixOptions::readOptionSetting(const KisPropertiesConfiguration* setting) { // m_options->separationIntervalSpinBox->setValue( setting->getInt("WateryMix/separationintervals") ); } ;
wyuka/calligra
krita/plugins/paintops/waterymixbrush/kis_waterymix_options.cpp
C++
gpl-2.0
1,901
{% extends "blogengine/includes/base.html" %} {% load custom_markdown %} {% block content %} {% if object_list %} {% for post in object_list %} <div class="post col-md-12"> <h1><a href="{{ post.get_absolute_url }}">{{ post.title }}</a></h1> <h3>{{ post.pub_date }}</h3> {{ post.text|custom_markdown }} </div> {% if post.category %} <div class="col-md-12"> <a href="{{ post.category.get_absolute_url }}"><span class="label label-primary">{{ post.category.name }}</span></a> </div> {% endif %} {% if post.tags %} <div class="col-md-12"> {% for tag in post.tags.all %} <a href="{{ tag.get_absolute_url }}"><span class="label label-success">{{ tag.name }}</span></a> {% endfor %} </div> {% endif %} {% endfor %} {% else %} <p>No posts found</p> {% endif %} <ul class="pager"> {% if page_obj.has_previous %} <li class="previous"><a href="/{{ page_obj.previous_page_number }}/">Previous Page</a></li> {% endif %} {% if page_obj.has_next %} <li class="next"><a href="/{{ page_obj.next_page_number }}/">Next Page</a></li> {% endif %} </ul> <a href="/feeds/posts/tag/{{ tag.slug }}/">RSS feed for tag {{ tag.name }}</a> {% endblock %}
matthewbdaly/django_tutorial_blog_ng
blogengine/templates/blogengine/tag_post_list.html
HTML
gpl-2.0
1,479
<?php /** * @package AdminTools * @copyright Copyright (c)2010-2014 Nicholas K. Dionysopoulos * @license GNU General Public License version 3, or later * @version $Id$ */ // Protect from unauthorized access defined('_JEXEC') or die; class AdmintoolsViewAdminuser extends F0FViewHtml { private function randomFalseUsername() { $usernames = array( '42', 'clinteastwood', 'chucknorris', 'rantanplan', 'pinky', 'brain', 'beavis', 'tux', 'larry', 'stevenseagal', 'jeanclaudevandamme', 'jackiechan' ); $id = 42; $dontadd = JFactory::getUser($id)->username; $ret = $dontadd; while ($ret == $dontadd) { $rand = rand(0, count($usernames) - 1); $ret = $usernames[$rand]; } return $ret; } protected function onBrowse($tpl = null) { $model = $this->getModel(); $this->hasDefaultAdmin = $model->hasDefaultAdmin(); $this->getDefaultUsername = $model->getDefaultUsername(); $this->fakeUsername = $this->randomFalseUsername(); if (version_compare(JVERSION, '3.0', 'ge')) { JHTML::_('behavior.framework'); } else { JHTML::_('behavior.mootools'); } } }
cuongnd/test_pro
administrator/components/com_admintools/views/adminuser/view.html.php
PHP
gpl-2.0
1,119
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <!-- <meta content="IE=edge" http-equiv="X-UA-Compatible"> --> <meta content="width=device-width, initial-scale=1" name="viewport"> <meta content="" name="description"> <meta content="" name="author"> <link href="ico/favicon.ico" rel="shortcut icon"> <title>Cannavaro Web Admin</title> <!-- Bootstrap core CSS --> <link rel="stylesheet" href="css/bootstrap.css"> <!-- Bootstrap theme --> <!-- <link rel="stylesheet" href="css/bootstrap-theme.min.css"> --> <!-- Custom styles for this template --> <link rel="stylesheet" href="css/theme.css"> <link rel="stylesheet" href="css/style.css"> <link rel="stylesheet" href="css/dripicon.css"> <link rel="stylesheet" href="css/typicons.css" /> <link rel="stylesheet" href="css/font-awesome.css" /> <link rel="stylesheet" href="css/responsive.css"> <link rel="stylesheet" href="js/tip/tooltipster.css"> <link rel="stylesheet" type="text/css" href="js/vegas/jquery.vegas.css" /> <link href="js/colorPicker/bootstrap-colorpicker.css" rel="stylesheet"> <link href="js/validate/validate.css" rel="stylesheet"> <link href="js/idealform/css/jquery.idealforms.css" rel="stylesheet"> <link rel="stylesheet" href="js/timepicker/bootstrap-timepicker.css"> <link rel="stylesheet" href="js/datepicker/datepicker.css"> <link rel="stylesheet" href="js/datepicker/clockface.css"> <!-- pace loader --> <script src="js/pace/pace.js"></script> <link href="js/pace/themes/orange/pace-theme-flash.css" rel="stylesheet" /> </head> <body role="document"> <div id="preloader"> <div id="status">&nbsp;</div> </div> <!-- TOPNAV --> <div class="row"> <div class="col-lg-3"> <ul class="nav navbar-nav navbar-left list-unstyled list-inline text-amber date-list"> <li><i class="fontello-th text-amber"></i> </li> <li id="Date"></li> </ul> <ul class="nav navbar-nav navbar-left list-unstyled list-inline text-amber date-list"> <li><i class="fontello-stopwatch text-amber"></i> </li> <li id="hours"></li> <li class="point">:</li> <li id="min"></li> <li class="point">:</li> <li id="sec"></li> </ul> </div> <div class="col-lg-6"> <div style="margin-bottom:0;" class="alert text-white "> <button data-dismiss="alert" class="close" type="button">×</button> <span class="entypo-info-circled"></span> <strong>Wellcome back!</strong>&nbsp;&nbsp;Dave, your last loggin was 1 day ago. Have a nice day </div> </div> <div class="col-lg-3"> <ul class="nav navbar-nav navbar-right"> <li> <a data-toggle="dropdown" class="dropdown-toggle text-white" href="#"> <img alt="" class="admin-pic img-circle" src="http://api.randomuser.me/portraits/thumb/men/23.jpg">Hi, Dave Mattew <b class="caret"></b> </a> <ul style="margin:25px 15px 0 0;" role="menu" class="dropdown-setting dropdown-menu bg-amber"> <li> <a href="#"> <span class="entypo-user"></span>&nbsp;&nbsp;My Profile</a> </li> <li> <a href="#"> <span class="entypo-vcard"></span>&nbsp;&nbsp;Account Setting</a> </li> <li> <a href="#"> <span class="entypo-lifebuoy"></span>&nbsp;&nbsp;Help</a> </li> <li> <a href="http://themeforest.net/item/apricot-navigation-admin-dashboard-template/7664475?WT.ac=category_item&amp;WT.z_author=themesmile"> <span class="entypo-basket"></span>&nbsp;&nbsp; Purchase</a> </li> </ul> </li> <li> <a data-toggle="dropdown" class="dropdown-toggle text-white" href="#"> <i class="icon-gear"></i>&nbsp;Setting <b class="caret"></b> </a> <ul style="margin:25px 15px 0 0;" role="menu" class="theme-bg dropdown-setting dropdown-menu bg-amber"> <li> <div id="button-bg"></div> </li> <li> <div id="button-bg2"></div> </li> <li> <div id="button-bg3"></div> </li> <li> <div id="button-bg4"></div> </li> <li> <div id="button-bg5"></div> </li> <li> <div id="button-bg6"></div> </li> </ul> </li> </ul> </div> </div> <!-- END OF TOPNAV --> <!-- Comtainer --> <div class="container-fluid paper-wrap bevel tlbr"> <div id="paper-top"> <div class="row"> <div class="col-sm-3 no-pad"> <a class="navbar-brand logo-text" href="#">Cannavaro</a> <ul class="list-unstyled list-inline noft-btn"> <li data-toggle="tooltip" data-placement="bottom" title="Profile"><i class=" icon-user"></i> </li> <li data-toggle="tooltip" data-placement="bottom" title="Log Out"> <a href="login.html" class="text-white"><i class="icon-upload"></i></a> </li> </ul> </div> <div class="col-sm-6 no-pad"> <ul style="margin-top:8px;" class="nav navbar-nav navbar-left list-unstyled list-inline text-gray date-list news-list"> <!-- <li><i class="fontello-doc-text text-gray"></i> </li> --> <li> <ul class="list-unstyled top-newsticker text-gray news-list"> <li><i class="fontello-cloud-flash-inv text-gray"></i>&nbsp;&nbsp; <strong>Yogyakarta,</strong>Achmad Dhani Partly Cloudy Feels Like &nbsp;<b>22 °C</b> </li> <li><i class="fontello-cloud-sun-inv text-gray"></i>&nbsp;&nbsp; <strong>Bandung,</strong>Jln Sudirman, Sunny Feels Like &nbsp;<b>31 °C</b> </li> <li><i class="fontello-rain-inv text-gray"></i>&nbsp;&nbsp; <strong>Jakarta,</strong>Tomang, Rain Like &nbsp;<b>19 °C</b> </li> </ul> </li> </ul> </div> <div class="col-sm-3 no-pad"> <!-- menu right --> <div class="navbar-right"> <ul class="nav navbar-nav margin-left"> <!-- Messages: style can be found in dropdown.less--> <li class="dropdown messages-menu"> <div class="drop-btn dropdown-toggle bg-white" data-toggle="dropdown"> <i class="fa fa-envelope text-navy"></i> <span class="label label-success label-drop">4</span> </div> <ul class="dropdown-menu drop-msg "> <li class="header bg-green"> You have 4 messages</li> <li> <!-- inner menu: contains the actual data --> <ul class="menu bg-white"> <li> <!-- start message --> <a href="#"> <div class="pull-left"> <img src="http://api.randomuser.me/portraits/thumb/men/37.jpg" class="img-circle" alt="User Image" /> </div> <h4> Developer <!-- <small><i class="fa fa-clock-o"></i> 5 mins</small> --> </h4> <p>Bug fixed level 90%</p> </a> </li> <!-- end message --> <li> <a href="#"> <div class="pull-left"> <img src="http://api.randomuser.me/portraits/thumb/women/36.jpg" class="img-circle" alt="user image" /> </div> <h4> Aplication Support </h4> <p>There is some bug in your last submit</p> </a> </li> <li> <a href="#"> <div class="pull-left"> <img src="http://api.randomuser.me/portraits/thumb/men/35.jpg" class="img-circle" alt="user image" /> </div> <h4> Lead Developers </h4> <p>Please check again your submit</p> </a> </li> <li> <a href="#"> <div class="pull-left"> <img src="http://api.randomuser.me/portraits/thumb/women/34.jpg" class="img-circle" alt="user image" /> </div> <h4> Web Designer </h4> <p>Art has done</p> </a> </li> <li> <a href="#"> <div class="pull-left"> <img src="http://api.randomuser.me/portraits/thumb/men/33.jpg" class="img-circle" alt="user image" /> </div> <h4> General Manager </h4> <p>Employed newslatter</p> </a> </li> </ul> </li> <li class="footer-green"> <!-- <div class="btn btn-xs bg-opacity-white-btn fontello-arrows-cw"></div> <div class="btn btn-xs bg-opacity-white-btn fontello-trash"></div> <div class="btn btn-xs bg-opacity-white-btn fontello-eye-outline"></div> --> </li> </ul> </li> <!-- Notifications: style can be found in dropdown.less --> <li class="dropdown notifications-menu"> <div class="drop-btn dropdown-toggle bg-white" data-toggle="dropdown"> <i class="fa fa-exclamation-triangle text-navy"></i> <span class="label bg-aqua label-drop">7</span> </div> <ul class="dropdown-menu drop-noft"> <li class="header bg-aqua"> You have 10 notifications</li> <li> <!-- inner menu: contains the actual data --> <ul class="menu bg-white"> <li> <a href="#"> <i class="fa icon-user"></i> New developer registered </a> </li> <li> <a href="#"> <i class="fa icon-cloud"></i> 2 item commit </a> </li> <li> <a href="#"> <i class="fa icon-download"></i> 3 members joined </a> </li> <li> <a href="#"> <i class="fa icon-tag"></i> 22 sales made </a> </li> <li> <a href="#"> <i class="fa icon-document"></i> New task from manager </a> </li> </ul> </li> <li class="footer-blue"> </li> </ul> </li> <!-- Tasks: style can be found in dropdown.less --> <li class="dropdown tasks-menu"> <div class="drop-btn bg-white dropdown-toggle" data-toggle="dropdown"> <i class="fa fa-briefcase text-navy"></i> <span class="label bg-red label-drop">9</span> </div> <ul class="dropdown-menu drop-task"> <li class="header bg-red"> <span></span>You have 9 tasks</li> <li> <!-- inner menu: contains the actual data --> <ul class="menu bg-white"> <li> <!-- Task item --> <div class="task-list-item"> <h2>Wed, 25 Mar 2014 <span>9:32 <small>PM</small> </span> </h2> <h1>Finished task Testing.</h1> <p>Lorem ipsum dollor si amet amet jabang bayi</p> </div> </li> <!-- end task item --> <li> <!-- Task item --> <div class="task-list-item"> <h2>Thu, 23 Mar 2014 <span>7:54 <small>PM</small> </span> </h2> <h1>Creat the documentation</h1> <p>Lorem ipsum dollor si amet amet jabang bayi</p> </div> </li> <!-- end task item --> <li> <!-- Task item --> <div class="task-list-item"> <h2>Wed, 21 Mar 2014 <span>12:43 <small>PM</small> </span> </h2> <h1>Repository you file now!</h1> <p>Lorem ipsum dollor si amet amet jabang bayi</p> </div> </li> <!-- end task item --> <li> <!-- Task item --> <div class="task-list-item"> <h2>Fri, 20 Mar 2014 <span>8:00 <small>PM</small> </span> </h2> <h1>Fill the job description</h1> <p>Lorem ipsum dollor si amet amet jabang bayi</p> </div> </li> <!-- end task item --> </ul> </li> <li class="footer-red"> </li> </ul> </li> </ul> </div> </div> <!-- end of menu right --> </div> </div> <!-- SIDE MENU --> <div class="wrap-sidebar-content"> <div id="skin-select"> <a id="toggle"> <span class="fa icon-menu"></span> </a> <div class="skin-part"> <div id="tree-wrap"> <div class="side-bar"> <ul id="menu-showhide" class="topnav"> <li class="devider-title"> <h3> <span>Desain Kit Menu</span> </h3> </li> <li> <a class="tooltip-tip" href="index.html" title="Dashboard"> <i class="fontello-desktop-1"></i> <span>Dashboard</span> </a> </li> <li> <a class="tooltip-tip" href="#" title="Mail"> <i class=" fontello-mail-1"></i> <span>mail</span> </a> <ul> <!-- <li class="hide-min-toggle">UI Element</li> --> <li> <!-- class="active" --> <a href="mail.html" title="Index">Inbox</a> </li> <li> <!-- class="active" --> <a href="compose.html" title="Compose">Compose</a> </li> </ul> </li> <li> <a class="tooltip-tip" href="#" title="UI"> <i class="fontello-note"></i> <span>UI</span> </a> <ul> <!-- <li class="hide-min-toggle">UI Element</li> --> <li> <!-- class="active" --> <a href="element.html" title="Element">Element</a> </li> <li><a href="button.html" title="Button"> Button </a> </li> <li> <a href="wizard.html" title="Tab & Accordion">Wizard</a> </li> <li> <a href="calendar.html" title="Calender">Calendar</a> </li> <li> <a href="tree.html" title="Tree View">Tree View</a> </li> <li> <a href="grids.html" title="Grids">Grids</a> </li> <li> <a href="chart.html" title="Chart">Chart</a> </li> <li> <a href="typhography.html" title="Typhoghrapy"> Typhoghrapy </a> </li> </ul> </li> <li> <a class="tooltip-tip" href="#" title="Layout"> <i class=" fontello-pencil-1"></i> <span>Layout&nbsp; <small class="side-menu-noft bg-aqua">new</small> </span> </a> <ul> <!-- <li class="hide-min-toggle">UI Element</li> --> <li> <!-- class="active" --> <a href="nopadding.html" title="Index">full width</a> </li> <li> <!-- class="active" --> <a href="minimize.html" title="Compose">minimize</a> </li> <li> <!-- class="active" --> <a href="rightmenu.html" title="Compose">Right Side bar</a> </li> <li> <!-- class="active" --> <a href="topmenu.html" title="Compose">Top menu</a> </li> </ul> </li> <li> <a class="tooltip-tip" href="#" title="Bg"> <i class="fontello-photo"></i> <span>Background&nbsp; <small class="side-menu-noft bg-aqua">new</small> </span> </a> <ul> <li> <a href="imagebg.html" title="Image">Image </a> </li> <li> <a href="videobg.html" title="Video">Video </a> </li> </ul> </li> <li class="devider-horizontal"></li> <li class="devider-title"> <h3> <span>Component</span> </h3> </li> <li> <a class="tooltip-tip" href="#" title="Form"> <i class="fontello-doc-1"></i> <span>Form</span> </a> <ul> <li> <a href="form-element.html" title="Form Elements">Form Elements</a> </li> <li> <a href="andvance-form.html" title="Andvance Form">Andvance Form</a> </li> <li> <a href="text-editor.html" title="Text Editor">Text Editor</a> </li> <li> <a href="file-upload.html" title="File Upload">File Upload</a> </li> </ul> </li> <li> <a class="tooltip-tip" href="#" title="Tables"> <i class="fontello-calendar-1"></i> <span>Tables</span> </a> <ul> <li> <a href="table-static.html" title="Table Static">Table Static</a> </li> <li> <a href="table-dynamic.html" title="Table Dynamic">Table Dynamic</a> </li> </ul> </li> <li> <a class="tooltip-tip" href="icon.html" title="Icons"> <i class="fontello-food"></i> <span>Icons</span> <div class="noft-blue bg-green" style="display: inline-block; float: none;">New</div> </a> </li> <li> <a class="tooltip-tip" href="map.html" title="Map"> <i class="fontello-location-1"></i> <span>Map</span> </a> </li> <li class="devider-horizontal"></li> <li class="devider-title"> <h3> <span>Special Page</span> </h3> </li> <li> <a class="tooltip-tip" href="#" title="Extra"> <i class="fontello-beaker"></i> <span>Extra</span> </a> <ul> <li> <a href="invoice.html" title="Invoice">Invoice</a> </li> <li> <a href="pricing_table.html" title="Pricing Table">Pricing Table</a> </li> <li> <a href="time-line.html" title="Time Line">Time Line</a> </li> <li> <a href="404.html" title="404 Error Page">404 Error Page</a> </li> <li> <a href="500.html" title="500 Error Page">500 Error Page</a> </li> <li> <a href="lock-screen.html" title="Lock Screen">Lock Screen</a> </li> </ul> </li> <li> <a class="tooltip-tip " href="login.html" title="login"> <i class=" fontello-lock-1"></i> <span>Login</span> </a> </li> </ul> <div class="side-dash"> <h3> <span>Task Progress</span> </h3> <ul class="side-dashh-list"> <li> <!-- Task item --> <a href="#"> <h3 class="fontello-leaf"> Tidy up your files <small class="pull-right">35%</small> </h3> <div class="progress xs bg-opacity-one height-tiny"> <div class="progress-bar bg-blue " style="width: 35%" role="progressbar" aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"> <span class="sr-only">35% Complete</span> </div> </div> </a> </li> <!-- end task item --> <li> <!-- Task item --> <a href="#"> <h3 class="fontello-box"> Check server status <small class="pull-right">40%</small> </h3> <div class="progress xs bg-opacity-one height-tiny"> <div class="progress-bar bg-aqua" style="width: 40%" role="progressbar" aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"> <span class="sr-only">40% Complete</span> </div> </div> </a> </li> <!-- end task item --> <li> <!-- Task item --> <a href="#"> <h3 class="fontello-headphones"> Support costumer <small class="pull-right">60%</small> </h3> <div class="progress xs bg-opacity-one height-tiny"> <div class="progress-bar bg-red-orange" style="width: 60%" role="progressbar" aria-valuenow="20" aria-valuemin="0" aria-valuemax="100"> <span class="sr-only">60% Complete</span> </div> </div> </a> </li> </ul> <h3> <span>Reply</span> </h3> <div id="doughnutChart" class="chart"></div> </div> </div> </div> </div> </div> <!-- #/skin-select --> <!-- END OF SIDE MENU --> <!-- Breadcrumb --> <div class="sub-board"> <span class="header-icon"><i class="fontello-home"></i> </span> <ol class="breadcrumb newcrumb ng-scope"> <li> <a href="#"> <span> </span>Form</a> </li> <li><a href="#">Advance Element</a> </li> </ol> <div class="dark" style="visibility: visible;"> <form class="navbar-form navbar-left" role="search"> <div class="form-group"> <input type="text" class="form-control search rounded id_search" placeholder="Search"> </div> </form> </div> </div> <!-- End of Breadcrumb --> <!-- CONTENT --> <div class="wrap-fluid" id="paper-bg"> <div class="row"> <div class="col-lg-12"> <div class="box"> <div class="box-header"> <!-- tools box --> <div class="pull-right box-tools"> <span class="box-btn" data-widget="collapse"><i class="fa fa-minus"></i> </span> <span class="box-btn" data-widget="remove"><i class="fa fa-times"></i> </span> </div> <h3 class="box-title"><i class="fontello-doc"></i> <span>Form Validation</span> </h3> </div> <!-- /.box-header --> <div class="box-body"> <form action="contact" id="contact-form" class="form-horizontal"> <fieldset> <div class="control-group"> <label class="control-label" for="name">Your Name</label> <div class="controls"> <input type="text" class="form-control" name="name" id="name"> </div> </div> <div class="control-group"> <label class="control-label" for="email">Email Address</label> <div class="controls"> <input type="text" class="form-control" name="email" id="email"> </div> </div> <div class="control-group"> <label class="control-label" for="subject">Subject</label> <div class="controls"> <input type="text" class="form-control" name="subject" id="subject"> </div> </div> <div class="control-group"> <label class="control-label" for="message">Your Message</label> <div class="controls"> <textarea class="form-control" name="message" id="message" rows="3"></textarea> </div> </div> <div class="form-actions" style="margin:20px 0 0 0;"> <button type="submit" class="btn btn-primary">Submit</button> <button type="reset" class="btn">Cancel</button> </div> </fieldset> </form> </div> <!-- /.box-body --> </div> <!-- /.box --> </div> </div> <div class="row"> <div class="col-lg-12"> <div class="box"> <div class="box-header"> <h3 class="box-title"> <span>Masked Input</span> </h3> </div> <!-- /.box-header --> <div class="box-body"> <div class="well"> <div class="input-group "> <span class="input-group-addon btn-success"><i class="fa fa-calendar"></i> </span> <input type="text" class="form-control" id="date"> <span class="input-group-addon ">99/99/9999</span> </div> </div> <div class="well"> <div class="input-group "> <span class="input-group-addon btn-success"><i class="fa fa-phone-square"></i> </span> <input type="text" class="form-control" id="phone"> <span class="input-group-addon ">eg.(021) 751-2789</span> </div> </div> <div class="well"> <div class="input-group "> <span class="input-group-addon btn-success"><i class="fa fa-money"></i> </span> <input type="text" class="form-control" id="money"> <span class="input-group-addon ">eg.20.000.000</span> </div> </div> <div class="well"> <div class="input-group "> <span class="input-group-addon btn-success"><i class="fa fa-phone-square"></i> </span> <input type="text" class="form-control" id="ssn"> <span class="input-group-addon ">eg.99-AAA-9999</span> </div> </div> </div> <!-- /.box-body --> </div> <!-- /.box --> </div> </div> <div class="row"> <div class="col-lg-12"> <div class="box"> <div class="box-header"> <h3 class="box-title"> <span>Date Picker</span> </h3> </div> <!-- /.box-header --> <div class="box-body"> <p>Attached to a field with the format specified via options.</p> <div class="well"> <input type="text" class="form-control" id="dp1" value="02-16-2012"> </div> <p>Start with years viewMode.</p> <div class="well"> <div data-date-viewmode="years" data-date-format="dd-mm-yyyy" data-date="12-02-2012" id="dpYears" class="input-group date"> <span class="input-group-addon add-on entypo-calendar "></span> <input type="text" value="12-02-2012" class="form-control" id="ssn2"> </div> </div> <p>Default behavior in pt-BR, picks date/time with fast masked input typing (need only to type the numbers, the static part of the mask is inserted automatically if missing) or via the popup widget, which supports year, month, day, hour and minute views:</p> <div class="well"> <div id="datetimepicker1" class="input-group date"> <input class="form-control" data-format="dd/MM/yyyy hh:mm:ss" type="text"> <span class="input-group-addon add-on"> <i style="font-style:normal;" data-time-icon="entypo-clock" data-date-icon="entypo-calendar"> </i> </span> </div> </div> </div> <!-- /.box-body --> </div> <!-- /.box --> </div> </div> <div class="row"> <div class="col-lg-12"> <div class="box"> <div class="box-header"> <h3 class="box-title"> <span>Time picker</span> </h3> </div> <!-- /.box-header --> <div class="box-body"> <div class="well"> <div class="input-group bootstrap-timepicker"> <input id="timepicker1" type="text" class="form-control"> <span class="input-group-addon add-on entypo-clock"></span> </div> </div> <div class="well"> <input id="t1" value="2:30 PM" data-format="hh:mm A" class="form-control" type="text"> </div> <div class="well"> <div class="input-group"> <input id="t2" value="14:30" class="form-control" readonly="" type="text"> <span style="cursor:pointer;" id="toggle-btn" class="input-group-addon add-on entypo-calendar "></span> </div> </div> </div> <!-- /.box-body --> </div> <!-- /.box --> </div> </div> <div class="row"> <div class="col-lg-12"> <div class="box"> <div class="box-header"> <h3 class="box-title"> <span>Color picker</span> </h3> </div> <!-- /.box-header --> <div class="box-body"> <div class="well"> <input type="text" class="form-control" value="#8fff00" id="cp1"> </div> <div class="well"> <input type="text" class="form-control" value="rgb(0,194,255,0.78)" id="cp2" data-color-format="rgba"> </div> <div class="well"> <div class="input-group colorpicker-component bscp" data-color="rgb(255, 146, 180)" data-color-format="rgb" id="cp3"> <input type="text" value="" readonly class="form-control" /> <span class="input-group-addon"><i style="background-color: rgb(255, 146, 180)"></i> </span> </div> </div> <div class="well"> <a href="#" class="btn btn-info" id="cp4" data-color-format="hex" data-color="rgb(255, 255, 255)">Change background color</a> </div> <hr> </div> <!-- /.box-body --> </div> <!-- /.box --> </div> </div> </div> <!-- #/paper bg --> </div> <!-- ./wrap-sidebar-content --> <!-- / END OF CONTENT --> <!-- FOOTER --> <div id="footer"> <div class="devider-footer-left"></div> <div class="time"> <p id="spanDate"></p> <p id="clock"></p> </div> <div class="copyright">Copyright &copy; 2014 <a href="http://ndesaintheme.com/">Themesmile</a> Made with <i class="fontello-heart-filled text-red"></i> </div> <div class="devider-footer"></div> <ul> <li><i class="fa fa-facebook-square"></i> </li> <li><i class="fa fa-twitter-square"></i> </li> <li><i class="fa fa-instagram"></i> </li> </ul> </div> <!-- / FOOTER --> </div> <!-- Container --> <!-- ================================================== --> <!-- Main jQuery Plugins --> <script type='text/javascript' src="js/jquery.js"></script> <script type='text/javascript' src='js/bootstrap.js'></script> <script type='text/javascript' src='js/date.js'></script> <script type='text/javascript' src='js/slimscroll/jquery.slimscroll.js'></script> <script type='text/javascript' src='js/jquery.nicescroll.min.js'></script> <script type='text/javascript' src='js/sliding-menu.js'></script> <script type='text/javascript' src='js/scriptbreaker-multiple-accordion-1.js'></script> <script type='text/javascript' src='js/tip/jquery.tooltipster.min.js'></script> <script type='text/javascript' src="js/donut-chart/jquery.drawDoughnutChart.js"></script> <script type='text/javascript' src="js/tab/jquery.newsTicker.js"></script> <script type='text/javascript' src="js/tab/app.ticker.js"></script> <script type='text/javascript' src='js/app.js'></script> <script type='text/javascript' src='js/vegas/jquery.vegas.js'></script> <script type='text/javascript' src='js/image-background.js'></script> <script type="text/javascript" src="js/jquery.tabSlideOut.v1.3.js"></script> <script type="text/javascript" src="js/bg-changer.js"></script> <script type="text/javascript" src="js/colorPicker/bootstrap-colorpicker.min.js"></script> <script type="text/javascript" src="js/inputMask/jquery.maskedinput.js"></script> <script type="text/javascript" src="js/validate/jquery.validate.min.js"></script> <script type="text/javascript" src="js/idealform/jquery.idealforms.js"></script> <script type="text/javascript" src="js/timepicker/bootstrap-timepicker.js"></script> <script type="text/javascript" src="js/datepicker/bootstrap-datepicker.js"></script> <script type="text/javascript" src="js/datepicker/clockface.js"></script> <script type="text/javascript" src="js/datepicker/bootstrap-datetimepicker.js"></script> <script type="text/javascript"> (function($) { "use strict"; $('#datetimepicker1').datetimepicker({ language: 'pt-BR' }); $('#dp1').datepicker() $('#dpYears').datepicker(); $('#timepicker1').timepicker(); $('#t1').clockface(); $('#t2').clockface({ format: 'HH:mm', trigger: 'manual' }); })(jQuery); (function($) { "use strict"; $('#toggle-btn').click(function(e) { e.stopPropagation(); $('#t2').clockface('toggle'); }); })(jQuery); $(document).ready(function() { //Validation $('#contact-form').validate({ rules: { name: { minlength: 2, required: true }, email: { required: true, email: true }, subject: { minlength: 2, required: true }, message: { minlength: 2, required: true } }, highlight: function(element) { $(element).closest('.control-group').removeClass('success').addClass('error'); }, success: function(element) { element .text('OK!').addClass('valid') .closest('.control-group').removeClass('error').addClass('success'); } }); // MASKED INPUT (function($) { "use strict"; $("#date").mask("99/99/9999", { completed: function() { alert("Your birthday was: " + this.val()); } }); $("#phone").mask("(999) 999-9999"); $("#money").mask("99.999.9999", { placeholder: "*" }); $("#ssn").mask("99--AAA--9999", { placeholder: "*" }); })(jQuery); //COLOR PICKER window.prettyPrint && prettyPrint(); // Code for those demos var _createColorpickers = function() { $('#cp1').colorpicker({ format: 'hex' }); $('#cp2').colorpicker(); $('#cp3').colorpicker(); var bodyStyle = $('body')[0].style; $('#cp4').colorpicker().on('changeColor', function(ev) { bodyStyle.backgroundColor = ev.color.toHex(); }); } _createColorpickers(); $('.bscp-destroy').click(function(e) { e.preventDefault(); $('.bscp').colorpicker('destroy'); }); $('.bscp-create').click(function(e) { e.preventDefault(); _createColorpickers(); }); }); </script> </body> </html>
ernandesferreira/workondemand
wp-content/themes/jobsplace/themeforest-8967421-cannavaro-notepad-memo-admin-dashboard-template/HTML - NEW/andvance-form.html
HTML
gpl-2.0
56,403
-- UPDATE `creature_template` SET `spell1`=56504, `spell2`=56513, `spell3`=56524,`unit_flags`=256 WHERE `entry`=30301; DELETE FROM `creature` WHERE `guid` IN (111098,111101); INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnDifficulties`, `phaseId`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `VerifiedBuild`) VALUES (111098, 30322, 571, 0, 0, '0', 172, 0, 1, 8501.92, -22.6061, 786.147, 3.01197, 7200, 0, 0, 0, 0, 0, 0, 0, 0, 0), (111101, 30301, 571, 0, 0, '0', 172, 0, 0, 8492.53, -35.9508, 787.042, 2.64262, 7200, 0, 0, 0, 0, 0, 0, 0, 0, 0); DELETE FROM `gameobject` WHERE `guid` IN (9004); INSERT INTO `gameobject` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnDifficulties`, `phaseId`, `position_x`, `position_y`, `position_z`, `orientation`, `rotation0`, `rotation1`, `rotation2`, `rotation3`, `spawntimesecs`, `animprogress`, `state`, `VerifiedBuild`) VALUES (9004, 192262, 571, 0, 0, '0', 172, 8494.46, -27.2724, 787.036, 2.8863, 0, 0, -0.991864, -0.1273, 300, 255, 1, 0); DELETE FROM `creature_addon` WHERE `guid` IN (6096); INSERT INTO `creature_addon` (`guid`, `path_id`, `bytes1`, `bytes2`, `auras`) VALUES (6096,60960,0,1,''); UPDATE `creature` SET `MovementType`=2 WHERE `id`= 30300; DELETE FROM `waypoint_data` WHERE `id` IN (60960); INSERT INTO `waypoint_data` (`id`,`point`,`position_x`,`position_y`,`position_z`,`orientation`, `action_chance`, `move_type`, `wpguid`) VALUES (60960, 1,8015.62988, -126.51499, 865.740234, 3.39914, 100, 0, 0), (60960, 2,7951.34668, -145.65760, 870.674805, 3.28296, 100, 0, 0), (60960, 3,7886.42480, -168.71707, 869.549194, 3.44004, 100, 0, 0), (60960, 4,7937.22900, -154.84613, 868.431519, 0.31415, 100, 0, 0), (60960, 5,8015.62988, -126.51499, 865.740234, 3.39914, 100, 0, 0), (60960, 6,8086.52685, -105.65644, 859.413513, 0.31415, 100, 0, 0), (60960, 7,8118.80419, -95.168877, 855.644043, 0.31415, 100, 0, 0);
Shauren/TrinityCore
sql/old/9.x/world/21111_2022_01_02/2021_12_11_01_world_2019_06_09_00_world.sql
SQL
gpl-2.0
2,061
# -*- coding: utf-8 -*- """QGIS Unit tests for core additions .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Denis Rouzaud' __date__ = '15.5.2018' __copyright__ = 'Copyright 2015, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA import os from qgis.testing import unittest, start_app from qgis.core import metaEnumFromValue, metaEnumFromType, QgsTolerance, QgsMapLayer import sip start_app() class TestCoreAdditions(unittest.TestCase): def testMetaEnum(self): me = metaEnumFromValue(QgsTolerance.Pixels) self.assertIsNotNone(me) self.assertEqual(me.valueToKey(QgsTolerance.Pixels), 'Pixels') # if using same variable twice (e.g. me = me2), this seg faults me2 = metaEnumFromValue(QgsTolerance.Pixels, QgsTolerance) self.assertIsNotNone(me) self.assertEqual(me2.valueToKey(QgsTolerance.Pixels), 'Pixels') # do not raise error self.assertIsNone(metaEnumFromValue(1, QgsTolerance, False)) # do not provide an int with self.assertRaises(TypeError): metaEnumFromValue(1) # QgsMapLayer.LayerType is not a Q_ENUM with self.assertRaises(ValueError): metaEnumFromValue(QgsMapLayer.LayerType) if __name__ == "__main__": unittest.main()
raymondnijssen/QGIS
tests/src/python/test_core_additions.py
Python
gpl-2.0
1,581
/********************************************************************** ** Copyright (C) 2000-2008 Trolltech ASA. All rights reserved. ** ** This file is part of Qt Designer. ** ** This file may be used under the terms of the GNU General ** Public License versions 2.0 or 3.0 as published by the Free ** Software Foundation and appearing in the files LICENSE.GPL2 ** and LICENSE.GPL3 included in the packaging of this file. ** Alternatively you may (at your option) use any later version ** of the GNU General Public License if such license has been ** publicly approved by Trolltech ASA (or its successors, if any) ** and the KDE Free Qt Foundation. ** ** Please review the following information to ensure GNU General ** Public Licensing requirements will be met: ** http://trolltech.com/products/qt/licenses/licensing/opensource/. ** If you are unsure which license is appropriate for your use, please ** review the following information: ** http://trolltech.com/products/qt/licenses/licensing/licensingoverview ** or contact the sales department at sales@trolltech.com. ** ** Licensees holding valid Qt Commercial licenses may use this file in ** accordance with the Qt Commercial License Agreement provided with ** the Software. ** ** This file is provided "AS IS" with NO WARRANTY OF ANY KIND, ** INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR ** A PARTICULAR PURPOSE. Trolltech reserves all rights not granted ** herein. ** **********************************************************************/ #ifndef SYNTAXHIGHLIGHTER_HTML_H #define SYNTAXHIGHLIGHTER_HTML_H #include <./private/qrichtext_p.h> class SyntaxHighlighter_HTML : public QTextPreProcessor { public: enum HTML { Standard = 1, Keyword, Attribute, AttribValue }; SyntaxHighlighter_HTML(); virtual ~SyntaxHighlighter_HTML(); void process( QTextDocument *doc, QTextParagraph *string, int start, bool invalidate = TRUE ); QTextFormat *format( int id ); private: void addFormat( int id, QTextFormat *f ); QTextFormat *lastFormat; int lastFormatId; QIntDict<QTextFormat> formats; }; #endif
serghei/kde3-qt
tools/designer/designer/syntaxhighlighter_html.h
C
gpl-2.0
2,127
/* * Asterisk -- An open source telephony toolkit. * * Copyright (C) 1999 - 2005, Digium, Inc. * * Mark Spencer <markster@digium.com> * * See http://www.asterisk.org for more information about * the Asterisk project. Please do not directly contact * any of the maintainers of this project for assistance; * the project provides a web site, mailing lists and IRC * channels for your use. * * This program is free software, distributed under the terms of * the GNU General Public License Version 2. See the LICENSE file * at the top of the source tree. */ #ifndef _ASTERISK_MANAGER_H #define _ASTERISK_MANAGER_H #include "asterisk/network.h" #include "asterisk/lock.h" #include "asterisk/datastore.h" #include "asterisk/xmldoc.h" /*! \file \brief The AMI - Asterisk Manager Interface - is a TCP protocol created to manage Asterisk with third-party software. Manager protocol packages are text fields of the form a: b. There is always exactly one space after the colon. \verbatim For Actions replies, the first line of the reply is a "Response:" header with values "success", "error" or "follows". "Follows" implies that the response is coming as separate events with the same ActionID. If the Action request has no ActionID, it will be hard matching events to the Action request in the manager client. The first header type is the "Event" header. Other headers vary from event to event. Headers end with standard \\r\\n termination. The last line of the manager response or event is an empty line. (\\r\\n) \endverbatim \note Please try to \b re-use \b existing \b headers to simplify manager message parsing in clients. Don't re-use an existing header with a new meaning, please. You can find a reference of standard headers in doc/manager.txt - \ref manager.c Main manager code file */ #define AMI_VERSION "2.7.0" #define DEFAULT_MANAGER_PORT 5038 /* Default port for Asterisk management via TCP */ #define DEFAULT_MANAGER_TLS_PORT 5039 /* Default port for Asterisk management via TCP */ /*! \name Constant return values *\note Currently, returning anything other than zero causes the session to terminate. */ /*@{ */ #define AMI_SUCCESS (0) #define AMI_DESTROY (-1) /*@} */ /*! \name Manager event classes */ /*@{ */ #define EVENT_FLAG_SYSTEM (1 << 0) /* System events such as module load/unload */ #define EVENT_FLAG_CALL (1 << 1) /* Call event, such as state change, etc */ #define EVENT_FLAG_LOG (1 << 2) /* Log events */ #define EVENT_FLAG_VERBOSE (1 << 3) /* Verbose messages */ #define EVENT_FLAG_COMMAND (1 << 4) /* Ability to read/set commands */ #define EVENT_FLAG_AGENT (1 << 5) /* Ability to read/set agent info */ #define EVENT_FLAG_USER (1 << 6) /* Ability to read/set user info */ #define EVENT_FLAG_CONFIG (1 << 7) /* Ability to modify configurations */ #define EVENT_FLAG_DTMF (1 << 8) /* Ability to read DTMF events */ #define EVENT_FLAG_REPORTING (1 << 9) /* Reporting events such as rtcp sent */ #define EVENT_FLAG_CDR (1 << 10) /* CDR events */ #define EVENT_FLAG_DIALPLAN (1 << 11) /* Dialplan events (VarSet, NewExten) */ #define EVENT_FLAG_ORIGINATE (1 << 12) /* Originate a call to an extension */ #define EVENT_FLAG_AGI (1 << 13) /* AGI events */ #define EVENT_FLAG_HOOKRESPONSE (1 << 14) /* Hook Response */ #define EVENT_FLAG_CC (1 << 15) /* Call Completion events */ #define EVENT_FLAG_AOC (1 << 16) /* Advice Of Charge events */ #define EVENT_FLAG_TEST (1 << 17) /* Test event used to signal the Asterisk Test Suite */ #define EVENT_FLAG_SECURITY (1 << 18) /* Security Message as AMI Event */ /*XXX Why shifted by 30? XXX */ #define EVENT_FLAG_MESSAGE (1 << 30) /* MESSAGE events. */ /*@} */ /*! \brief Export manager structures */ #define AST_MAX_MANHEADERS 128 /*! \brief Manager Helper Function * * \param category The class authorization category of the event * \param event The name of the event being raised * \param body The body of the event * * \retval 0 Success * \retval non-zero Error */ typedef int (*manager_hook_t)(int category, const char *event, char *body); struct manager_custom_hook { /*! Identifier */ char *file; /*! helper function */ manager_hook_t helper; /*! Linked list information */ AST_RWLIST_ENTRY(manager_custom_hook) list; }; /*! \brief Check if AMI is enabled */ int check_manager_enabled(void); /*! \brief Check if AMI/HTTP is enabled */ int check_webmanager_enabled(void); /*! Add a custom hook to be called when an event is fired \param hook struct manager_custom_hook object to add */ void ast_manager_register_hook(struct manager_custom_hook *hook); /*! Delete a custom hook to be called when an event is fired \param hook struct manager_custom_hook object to delete */ void ast_manager_unregister_hook(struct manager_custom_hook *hook); /*! \brief Registered hooks can call this function to invoke actions and they will receive responses through registered callback * \param hook the file identifier specified in manager_custom_hook struct when registering a hook * \param msg ami action mesage string e.g. "Action: SipPeers\r\n" * \retval 0 on Success * \retval non-zero on Failure */ int ast_hook_send_action(struct manager_custom_hook *hook, const char *msg); struct mansession; struct message { unsigned int hdrcount; const char *headers[AST_MAX_MANHEADERS]; }; struct manager_action { /*! Name of the action */ const char *action; AST_DECLARE_STRING_FIELDS( AST_STRING_FIELD(synopsis); /*!< Synopsis text (short description). */ AST_STRING_FIELD(description); /*!< Description (help text) */ AST_STRING_FIELD(syntax); /*!< Syntax text */ AST_STRING_FIELD(arguments); /*!< Description of each argument. */ AST_STRING_FIELD(seealso); /*!< See also */ ); /*! Possible list element response events. */ struct ast_xml_doc_item *list_responses; /*! Final response event. */ struct ast_xml_doc_item *final_response; /*! Permission required for action. EVENT_FLAG_* */ int authority; /*! Function to be called */ int (*func)(struct mansession *s, const struct message *m); struct ast_module *module; /*!< Module this action belongs to */ /*! Where the documentation come from. */ enum ast_doc_src docsrc; /*! For easy linking */ AST_RWLIST_ENTRY(manager_action) list; /*! * \brief TRUE if the AMI action is registered and the callback can be called. * * \note Needed to prevent a race between calling the callback * function and unregestring the AMI action object. */ unsigned int registered:1; }; /*! \brief External routines may register/unregister manager callbacks this way * \note Use ast_manager_register2() to register with help text for new manager commands */ #define ast_manager_register(action, authority, func, synopsis) ast_manager_register2(action, authority, func, ast_module_info->self, synopsis, NULL) /*! \brief Register a manager callback using XML documentation to describe the manager. */ #define ast_manager_register_xml(action, authority, func) ast_manager_register2(action, authority, func, ast_module_info->self, NULL, NULL) /*! * \brief Register a manager callback using XML documentation to describe the manager. * * \note For Asterisk core modules that are not independently * loadable. * * \warning If you use ast_manager_register_xml() instead when * you need to use this function, Asterisk will crash on load. */ #define ast_manager_register_xml_core(action, authority, func) ast_manager_register2(action, authority, func, NULL, NULL, NULL) /*! * \brief Register a manager command with the manager interface * \param action Name of the requested Action: * \param authority Required authority for this command * \param func Function to call for this command * \param module The module containing func. (NULL if module is part of core and not loadable) * \param synopsis Help text (one line, up to 30 chars) for CLI manager show commands * \param description Help text, several lines */ int ast_manager_register2( const char *action, int authority, int (*func)(struct mansession *s, const struct message *m), struct ast_module *module, const char *synopsis, const char *description); /*! * \brief Unregister a registered manager command * \param action Name of registered Action: */ int ast_manager_unregister(const char *action); /*! * \brief Verify a session's read permissions against a permission mask. * \param ident session identity * \param perm permission mask to verify * \retval 1 if the session has the permission mask capabilities * \retval 0 otherwise */ int astman_verify_session_readpermissions(uint32_t ident, int perm); /*! * \brief Verify a session's write permissions against a permission mask. * \param ident session identity * \param perm permission mask to verify * \retval 1 if the session has the permission mask capabilities, otherwise 0 * \retval 0 otherwise */ int astman_verify_session_writepermissions(uint32_t ident, int perm); /*! \brief External routines may send asterisk manager events this way * \param category Event category, matches manager authorization \param event Event name \param contents Contents of event */ /* XXX the parser in gcc 2.95 gets confused if you don't put a space * between the last arg before VA_ARGS and the comma */ #define manager_event(category, event, contents , ...) \ __ast_manager_event_multichan(category, event, 0, NULL, __FILE__, __LINE__, __PRETTY_FUNCTION__, contents , ## __VA_ARGS__) #define ast_manager_event(chan, category, event, contents , ...) \ do { \ struct ast_channel *_chans[] = { chan, }; \ __ast_manager_event_multichan(category, event, 1, _chans, __FILE__, __LINE__, __PRETTY_FUNCTION__, contents , ## __VA_ARGS__); \ } while (0) #define ast_manager_event_multichan(category, event, nchans, chans, contents , ...) \ __ast_manager_event_multichan(category, event, nchans, chans, __FILE__, __LINE__, __PRETTY_FUNCTION__, contents , ## __VA_ARGS__); /*! External routines may send asterisk manager events this way * \param category Event category, matches manager authorization * \param event Event name * \param chancount Number of channels in chans parameter * \param chans A pointer to an array of channels involved in the event * \param file, line, func * \param contents Format string describing event * \param ... * \since 1.8 */ int __ast_manager_event_multichan(int category, const char *event, int chancount, struct ast_channel **chans, const char *file, int line, const char *func, const char *contents, ...) __attribute__((format(printf, 8, 9))); /*! \brief Get header from mananger transaction */ const char *astman_get_header(const struct message *m, char *var); /*! \brief Get a linked list of the Variable: headers * * \note Order of variables is reversed from the order they are specified in * the manager message */ struct ast_variable *astman_get_variables(const struct message *m); enum variable_orders { ORDER_NATURAL, ORDER_REVERSE }; /*! \brief Get a linked list of the Variable: headers with order specified */ struct ast_variable *astman_get_variables_order(const struct message *m, enum variable_orders order); /*! \brief Send error in manager transaction */ void astman_send_error(struct mansession *s, const struct message *m, char *error); /*! \brief Send error in manager transaction (with va_args support) */ void __attribute__((format(printf, 3, 4))) astman_send_error_va(struct mansession *s, const struct message *m, const char *fmt, ...); /*! \brief Send response in manager transaction */ void astman_send_response(struct mansession *s, const struct message *m, char *resp, char *msg); /*! \brief Send ack in manager transaction */ void astman_send_ack(struct mansession *s, const struct message *m, char *msg); /*! * \brief Send ack in manager transaction to begin a list. * * \param s - AMI session control struct. * \param m - AMI action request that started the list. * \param msg - Message contents describing the list to follow. * \param listflag - Should always be set to "start". * * \note You need to call astman_send_list_complete_start() and * astman_send_list_complete_end() to send the AMI list completion event. * * \return Nothing */ void astman_send_listack(struct mansession *s, const struct message *m, char *msg, char *listflag); /*! * \brief Start the list complete event. * \since 13.2.0 * * \param s - AMI session control struct. * \param m - AMI action request that started the list. * \param event_name - AMI list complete event name. * \param count - Number of items in the list. * * \note You need to call astman_send_list_complete_end() to end * the AMI list completion event. * * \note Between calling astman_send_list_complete_start() and * astman_send_list_complete_end() you can add additonal headers * using astman_append(). * * \return Nothing */ void astman_send_list_complete_start(struct mansession *s, const struct message *m, const char *event_name, int count); /*! * \brief End the list complete event. * \since 13.2.0 * * \param s - AMI session control struct. * * \note You need to call astman_send_list_complete_start() to start * the AMI list completion event. * * \note Between calling astman_send_list_complete_start() and * astman_send_list_complete_end() you can add additonal headers * using astman_append(). * * \return Nothing */ void astman_send_list_complete_end(struct mansession *s); void __attribute__((format(printf, 2, 3))) astman_append(struct mansession *s, const char *fmt, ...); /*! \brief Determinie if a manager session ident is authenticated */ int astman_is_authed(uint32_t ident); /*! \brief Called by Asterisk initialization */ int init_manager(void); /*! \brief Called by Asterisk module functions and the CLI command */ int reload_manager(void); /*! * \brief Add a datastore to a session * * \retval 0 success * \retval non-zero failure * \since 1.6.1 */ int astman_datastore_add(struct mansession *s, struct ast_datastore *datastore); /*! * \brief Remove a datastore from a session * * \retval 0 success * \retval non-zero failure * \since 1.6.1 */ int astman_datastore_remove(struct mansession *s, struct ast_datastore *datastore); /*! * \brief Find a datastore on a session * * \retval pointer to the datastore if found * \retval NULL if not found * \since 1.6.1 */ struct ast_datastore *astman_datastore_find(struct mansession *s, const struct ast_datastore_info *info, const char *uid); /*! * \brief append an event header to an ast string * \since 12 * * \param fields_string pointer to an ast_string pointer. It may be a pointer to a * NULL ast_str pointer, in which case the ast_str will be initialized. * \param header The header being applied * \param value the value of the header * * \retval 0 if successful * \retval non-zero on failure */ int ast_str_append_event_header(struct ast_str **fields_string, const char *header, const char *value); /*! \brief Struct representing a snapshot of channel state */ struct ast_channel_snapshot; /*! * \brief Generate the AMI message body from a channel snapshot * \since 12 * * \param snapshot the channel snapshot for which to generate an AMI message * body * \param prefix What to prepend to the channel fields * * \retval NULL on error * \retval ast_str* on success (must be ast_freed by caller) */ struct ast_str *ast_manager_build_channel_state_string_prefix( const struct ast_channel_snapshot *snapshot, const char *prefix); /*! * \brief Generate the AMI message body from a channel snapshot * \since 12 * * \param snapshot the channel snapshot for which to generate an AMI message * body * * \retval NULL on error * \retval ast_str* on success (must be ast_freed by caller) */ struct ast_str *ast_manager_build_channel_state_string( const struct ast_channel_snapshot *snapshot); /*! \brief Struct representing a snapshot of bridge state */ struct ast_bridge_snapshot; /*! * \since 12 * \brief Callback used to determine whether a key should be skipped when converting a * JSON object to a manager blob * \param key Key from JSON blob to be evaluated * \retval non-zero if the key should be excluded * \retval zero if the key should not be excluded */ typedef int (*key_exclusion_cb)(const char *key); struct ast_json; /*! * \since 12 * \brief Convert a JSON object into an AMI compatible string * * \param blob The JSON blob containing key/value pairs to convert * \param exclusion_cb A \ref key_exclusion_cb pointer to a function that will exclude * keys from the final AMI string * * \retval A malloc'd \ref ast_str object. Callers of this function should free * the returned \ref ast_str object * \retval NULL on error */ struct ast_str *ast_manager_str_from_json_object(struct ast_json *blob, key_exclusion_cb exclusion_cb); /*! * \brief Generate the AMI message body from a bridge snapshot * \since 12 * * \param snapshot the bridge snapshot for which to generate an AMI message * body * \param prefix What to prepend to the bridge fields * * \retval NULL on error * \retval ast_str* on success (must be ast_freed by caller) */ struct ast_str *ast_manager_build_bridge_state_string_prefix( const struct ast_bridge_snapshot *snapshot, const char *prefix); /*! * \brief Generate the AMI message body from a bridge snapshot * \since 12 * * \param snapshot the bridge snapshot for which to generate an AMI message * body * * \retval NULL on error * \retval ast_str* on success (must be ast_freed by caller) */ struct ast_str *ast_manager_build_bridge_state_string( const struct ast_bridge_snapshot *snapshot); /*! \brief Struct containing info for an AMI event to send out. */ struct ast_manager_event_blob { int event_flags; /*!< Flags the event should be raised with. */ const char *manager_event; /*!< The event to be raised, should be a string literal. */ AST_DECLARE_STRING_FIELDS( AST_STRING_FIELD(extra_fields); /*!< Extra fields to include in the event. */ ); }; /*! * \since 12 * \brief Construct a \ref ast_manager_event_blob. * * The returned object is AO2 managed, so clean up with ao2_cleanup(). * * \param event_flags Flags the event should be raised with. * \param manager_event The event to be raised, should be a string literal. * \param extra_fields_fmt Format string for extra fields to include. * Or NO_EXTRA_FIELDS for no extra fields. * * \return New \ref ast_manager_snapshot_event object. * \return \c NULL on error. */ struct ast_manager_event_blob * __attribute__((format(printf, 3, 4))) ast_manager_event_blob_create( int event_flags, const char *manager_event, const char *extra_fields_fmt, ...); /*! GCC warns about blank or NULL format strings. So, shenanigans! */ #define NO_EXTRA_FIELDS "%s", "" /*! * \since 12 * \brief Initialize support for AMI system events. * \retval 0 on success * \retval non-zero on error */ int manager_system_init(void); /*! * \brief Initialize support for AMI channel events. * \retval 0 on success. * \retval non-zero on error. * \since 12 */ int manager_channels_init(void); /*! * \since 12 * \brief Initialize support for AMI MWI events. * \retval 0 on success * \retval non-zero on error */ int manager_mwi_init(void); /*! * \brief Initialize support for AMI channel events. * \return 0 on success. * \return non-zero on error. * \since 12 */ int manager_bridging_init(void); /*! * \brief Initialize support for AMI endpoint events. * \return 0 on success. * \return non-zero on error. * \since 12 */ int manager_endpoints_init(void); /*! * \since 12 * \brief Get the \ref stasis_message_type for generic messages * * A generic AMI message expects a JSON only payload. The payload must have the following * structure: * {type: s, class_type: i, event: [ {s: s}, ...] } * * - type is the AMI event type * - class_type is the class authorization type for the event * - event is a list of key/value tuples to be sent out in the message * * \retval A \ref stasis_message_type for AMI messages */ struct stasis_message_type *ast_manager_get_generic_type(void); /*! * \since 12 * \brief Get the \ref stasis topic for AMI * * \retval The \ref stasis topic for AMI * \retval NULL on error */ struct stasis_topic *ast_manager_get_topic(void); /*! * \since 12 * \brief Publish an event to AMI * * \param type The type of AMI event to publish * \param class_type The class on which to publish the event * \param obj The event data to be published. * * Publishes a message to the \ref stasis message bus solely for the consumption of AMI. * The message will be of the type provided by \ref ast_manager_get_type, and will be * published to the topic provided by \ref ast_manager_get_topic. As such, the JSON must * be constructed as defined by the \ref ast_manager_get_type message. */ void ast_manager_publish_event(const char *type, int class_type, struct ast_json *obj); /*! * \since 12 * \brief Get the \ref stasis_message_router for AMI * * \retval The \ref stasis_message_router for AMI * \retval NULL on error */ struct stasis_message_router *ast_manager_get_message_router(void); #endif /* _ASTERISK_MANAGER_H */
michalliu/OpenWrt-Firefly-Libraries
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/include/asterisk-13/include/asterisk/manager.h
C
gpl-2.0
21,550
#pragma once /* * Copyright (C) 2005-2013 Team XBMC * http://www.xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "view/GUIViewState.h" class CGUIViewStateWindowPictures : public CGUIViewState { public: CGUIViewStateWindowPictures(const CFileItemList& items); protected: virtual void SaveViewState(); virtual CStdString GetLockType(); virtual CStdString GetExtensions(); virtual VECSOURCES& GetSources(); };
smspillaz/xbmc
xbmc/pictures/GUIViewStatePictures.h
C
gpl-2.0
1,072
/* * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ /* * This file generates databases with information about all supported audio * codecs. */ #ifndef WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_CODEC_DATABASE_H_ #define WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_CODEC_DATABASE_H_ #include "acm_generic_codec.h" #include "common_types.h" #include "webrtc_neteq.h" namespace webrtc { // TODO(tlegrand): replace class ACMCodecDB with a namespace. class ACMCodecDB { public: // Enum with array indexes for the supported codecs. NOTE! The order MUST // be the same as when creating the database in acm_codec_database.cc. enum { kNone = -1 #if (defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)) , kISAC # if (defined(WEBRTC_CODEC_ISAC)) , kISACSWB # endif #endif #ifdef WEBRTC_CODEC_PCM16 // Mono , kPCM16B , kPCM16Bwb , kPCM16Bswb32kHz // Stereo , kPCM16B_2ch , kPCM16Bwb_2ch , kPCM16Bswb32kHz_2ch #endif // Mono , kPCMU , kPCMA // Stereo , kPCMU_2ch , kPCMA_2ch #ifdef WEBRTC_CODEC_ILBC , kILBC #endif #ifdef WEBRTC_CODEC_AMR , kGSMAMR #endif #ifdef WEBRTC_CODEC_AMRWB , kGSMAMRWB #endif #ifdef WEBRTC_CODEC_CELT // Mono , kCELT32 // Stereo , kCELT32_2ch #endif #ifdef WEBRTC_CODEC_G722 // Mono , kG722 // Stereo , kG722_2ch #endif #ifdef WEBRTC_CODEC_G722_1 , kG722_1_32 , kG722_1_24 , kG722_1_16 #endif #ifdef WEBRTC_CODEC_G722_1C , kG722_1C_48 , kG722_1C_32 , kG722_1C_24 #endif #ifdef WEBRTC_CODEC_G729 , kG729 #endif #ifdef WEBRTC_CODEC_G729_1 , kG729_1 #endif #ifdef WEBRTC_CODEC_GSMFR , kGSMFR #endif #ifdef WEBRTC_CODEC_SPEEX , kSPEEX8 , kSPEEX16 #endif , kCNNB , kCNWB , kCNSWB #ifdef WEBRTC_CODEC_AVT , kAVT #endif #ifdef WEBRTC_CODEC_RED , kRED #endif , kNumCodecs }; // Set unsupported codecs to -1 #ifndef WEBRTC_CODEC_ISAC enum {kISACSWB = -1}; # ifndef WEBRTC_CODEC_ISACFX enum {kISAC = -1}; # endif #endif #ifndef WEBRTC_CODEC_PCM16 // Mono enum {kPCM16B = -1}; enum {kPCM16Bwb = -1}; enum {kPCM16Bswb32kHz = -1}; // Stereo enum {kPCM16B_2ch = -1}; enum {kPCM16Bwb_2ch = -1}; enum {kPCM16Bswb32kHz_2ch = -1}; #endif // 48 kHz not supported, always set to -1. enum {kPCM16Bswb48kHz = -1}; #ifndef WEBRTC_CODEC_ILBC enum {kILBC = -1}; #endif #ifndef WEBRTC_CODEC_AMR enum {kGSMAMR = -1}; #endif #ifndef WEBRTC_CODEC_AMRWB enum {kGSMAMRWB = -1}; #endif #ifndef WEBRTC_CODEC_CELT // Mono enum {kCELT32 = -1}; // Stereo enum {kCELT32_2ch = -1}; #endif #ifndef WEBRTC_CODEC_G722 // Mono enum {kG722 = -1}; // Stereo enum {kG722_2ch = -1}; #endif #ifndef WEBRTC_CODEC_G722_1 enum {kG722_1_32 = -1}; enum {kG722_1_24 = -1}; enum {kG722_1_16 = -1}; #endif #ifndef WEBRTC_CODEC_G722_1C enum {kG722_1C_48 = -1}; enum {kG722_1C_32 = -1}; enum {kG722_1C_24 = -1}; #endif #ifndef WEBRTC_CODEC_G729 enum {kG729 = -1}; #endif #ifndef WEBRTC_CODEC_G729_1 enum {kG729_1 = -1}; #endif #ifndef WEBRTC_CODEC_GSMFR enum {kGSMFR = -1}; #endif #ifndef WEBRTC_CODEC_SPEEX enum {kSPEEX8 = -1}; enum {kSPEEX16 = -1}; #endif #ifndef WEBRTC_CODEC_AVT enum {kAVT = -1}; #endif #ifndef WEBRTC_CODEC_RED enum {kRED = -1}; #endif // kMaxNumCodecs - Maximum number of codecs that can be activated in one // build. // kMaxNumPacketSize - Maximum number of allowed packet sizes for one codec. // These might need to be increased if adding a new codec to the database static const int kMaxNumCodecs = 50; static const int kMaxNumPacketSize = 6; // Codec specific settings // // num_packet_sizes - number of allowed packet sizes. // packet_sizes_samples - list of the allowed packet sizes. // basic_block_samples - assigned a value different from 0 if the codec // requires to be fed with a specific number of samples // that can be different from packet size. // channel_support - number of channels supported to encode; // 1 = mono, 2 = stereo, etc. struct CodecSettings { int num_packet_sizes; int packet_sizes_samples[kMaxNumPacketSize]; int basic_block_samples; int channel_support; }; // Gets codec information from database at the position in database given by // [codec_id]. // Input: // [codec_id] - number that specifies at what position in the database to // get the information. // Output: // [codec_inst] - filled with information about the codec. // Return: // 0 if successful, otherwise -1. static int Codec(int codec_id, CodecInst* codec_inst); // Returns codec id and mirror id from database, given the information // received in the input [codec_inst]. Mirror id is a number that tells // where to find the codec's memory (instance). The number is either the // same as codec id (most common), or a number pointing at a different // entry in the database, if the codec has several entries with different // payload types. This is used for codecs that must share one struct even if // the payload type differs. // One example is the codec iSAC which has the same struct for both 16 and // 32 khz, but they have different entries in the database. Let's say the // function is called with iSAC 32kHz. The function will return 1 as that is // the entry in the data base, and [mirror_id] = 0, as that is the entry for // iSAC 16 kHz, which holds the shared memory. // Input: // [codec_inst] - Information about the codec for which we require the // database id. // Output: // [mirror_id] - mirror id, which most often is the same as the return // value, see above. // [err_message] - if present, in the event of a mismatch found between the // input and the database, a descriptive error message is // written here. // [err_message] - if present, the length of error message is returned here. // Return: // codec id if successful, otherwise < 0. static int CodecNumber(const CodecInst* codec_inst, int* mirror_id, char* err_message, int max_message_len_byte); static int CodecNumber(const CodecInst* codec_inst, int* mirror_id); static int CodecId(const CodecInst* codec_inst); static int CodecId(const char* payload_name, int frequency, int channels); static int ReceiverCodecNumber(const CodecInst* codec_inst, int* mirror_id); // Returns the codec sampling frequency for codec with id = "codec_id" in // database. // TODO(tlegrand): Check if function is needed, or if we can change // to access database directly. // Input: // [codec_id] - number that specifies at what position in the database to // get the information. // Return: // codec sampling frequency if successful, otherwise -1. static int CodecFreq(int codec_id); // Return the codec's basic coding block size in samples. // TODO(tlegrand): Check if function is needed, or if we can change // to access database directly. // Input: // [codec_id] - number that specifies at what position in the database to // get the information. // Return: // codec basic block size if successful, otherwise -1. static int BasicCodingBlock(int codec_id); // Returns the NetEQ decoder database. static const WebRtcNetEQDecoder* NetEQDecoders(); // Returns mirror id, which is a number that tells where to find the codec's // memory (instance). It is either the same as codec id (most common), or a // number pointing at a different entry in the database, if the codec have // several entries with different payload types. This is used for codecs that // must share struct even if the payload type differs. // TODO(tlegrand): Check if function is needed, or if we can change // to access database directly. // Input: // [codec_id] - number that specifies codec's position in the database. // Return: // Mirror id on success, otherwise -1. static int MirrorID(int codec_id); // Create memory/instance for storing codec state. // Input: // [codec_inst] - information about codec. Only name of codec, "plname", is // used in this function. static ACMGenericCodec* CreateCodecInstance(const CodecInst* codec_inst); // Checks if the bitrate is valid for the codec. // Input: // [codec_id] - number that specifies codec's position in the database. // [rate] - bitrate to check. // [frame_size_samples] - (used for iLBC) specifies which frame size to go // with the rate. static bool IsRateValid(int codec_id, int rate); static bool IsISACRateValid(int rate); static bool IsILBCRateValid(int rate, int frame_size_samples); static bool IsAMRRateValid(int rate); static bool IsAMRwbRateValid(int rate); static bool IsG7291RateValid(int rate); static bool IsSpeexRateValid(int rate); static bool IsCeltRateValid(int rate); // Check if the payload type is valid, meaning that it is in the valid range // of 0 to 127. // Input: // [payload_type] - payload type. static bool ValidPayloadType(int payload_type); // Databases with information about the supported codecs // database_ - stored information about all codecs: payload type, name, // sampling frequency, packet size in samples, default channel // support, and default rate. // codec_settings_ - stored codec settings: number of allowed packet sizes, // a vector with the allowed packet sizes, basic block // samples, and max number of channels that are supported. // neteq_decoders_ - list of supported decoders in NetEQ. static const CodecInst database_[kMaxNumCodecs]; static const CodecSettings codec_settings_[kMaxNumCodecs]; static const WebRtcNetEQDecoder neteq_decoders_[kMaxNumCodecs]; }; } // namespace webrtc #endif // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_CODEC_DATABASE_H_
techstormteam/12-vision-connect-ios-linphone
submodules/mswebrtc/webrtc/modules/audio_coding/main/source/acm_codec_database.h
C
gpl-2.0
10,472
/* * This declarations of the PIC16LF1826 MCU. * * This file is part of the GNU PIC library for SDCC, originally * created by Molnar Karoly <molnarkaroly@users.sf.net> 2016. * * This file is generated automatically by the cinc2h.pl, 2016-01-17 15:35:58 UTC. * * SDCC is licensed under the GNU Public license (GPL) v2. Note that * this license covers the code to the compiler and other executables, * but explicitly does not cover any code or objects generated by sdcc. * * For pic device libraries and header files which are derived from * Microchip header (.inc) and linker script (.lkr) files Microchip * requires that "The header files should state that they are only to be * used with authentic Microchip devices" which makes them incompatible * with the GPL. Pic device libraries and header files are located at * non-free/lib and non-free/include directories respectively. * Sdcc should be run with the --use-non-free command line option in * order to include non-free header files and libraries. * * See http://sdcc.sourceforge.net/ for the latest information on sdcc. */ #ifndef __PIC16LF1826_H__ #define __PIC16LF1826_H__ //============================================================================== // // Register Addresses // //============================================================================== #ifndef NO_ADDR_DEFINES #define INDF0_ADDR 0x0000 #define INDF1_ADDR 0x0001 #define PCL_ADDR 0x0002 #define STATUS_ADDR 0x0003 #define FSR0_ADDR 0x0004 #define FSR0L_ADDR 0x0004 #define FSR0H_ADDR 0x0005 #define FSR1_ADDR 0x0006 #define FSR1L_ADDR 0x0006 #define FSR1H_ADDR 0x0007 #define BSR_ADDR 0x0008 #define WREG_ADDR 0x0009 #define PCLATH_ADDR 0x000A #define INTCON_ADDR 0x000B #define PORTA_ADDR 0x000C #define PORTB_ADDR 0x000D #define PIR1_ADDR 0x0011 #define PIR2_ADDR 0x0012 #define TMR0_ADDR 0x0015 #define TMR1_ADDR 0x0016 #define TMR1L_ADDR 0x0016 #define TMR1H_ADDR 0x0017 #define T1CON_ADDR 0x0018 #define T1GCON_ADDR 0x0019 #define TMR2_ADDR 0x001A #define PR2_ADDR 0x001B #define T2CON_ADDR 0x001C #define CPSCON0_ADDR 0x001E #define CPSCON1_ADDR 0x001F #define TRISA_ADDR 0x008C #define TRISB_ADDR 0x008D #define PIE1_ADDR 0x0091 #define PIE2_ADDR 0x0092 #define OPTION_REG_ADDR 0x0095 #define PCON_ADDR 0x0096 #define WDTCON_ADDR 0x0097 #define OSCTUNE_ADDR 0x0098 #define OSCCON_ADDR 0x0099 #define OSCSTAT_ADDR 0x009A #define ADRES_ADDR 0x009B #define ADRESL_ADDR 0x009B #define ADRESH_ADDR 0x009C #define ADCON0_ADDR 0x009D #define ADCON1_ADDR 0x009E #define LATA_ADDR 0x010C #define LATB_ADDR 0x010D #define CM1CON0_ADDR 0x0111 #define CM1CON1_ADDR 0x0112 #define CM2CON0_ADDR 0x0113 #define CM2CON1_ADDR 0x0114 #define CMOUT_ADDR 0x0115 #define BORCON_ADDR 0x0116 #define FVRCON_ADDR 0x0117 #define DACCON0_ADDR 0x0118 #define DACCON1_ADDR 0x0119 #define SRCON0_ADDR 0x011A #define SRCON1_ADDR 0x011B #define APFCON0_ADDR 0x011D #define APFCON1_ADDR 0x011E #define ANSELA_ADDR 0x018C #define ANSELB_ADDR 0x018D #define EEADR_ADDR 0x0191 #define EEADRL_ADDR 0x0191 #define EEADRH_ADDR 0x0192 #define EEDAT_ADDR 0x0193 #define EEDATL_ADDR 0x0193 #define EEDATH_ADDR 0x0194 #define EECON1_ADDR 0x0195 #define EECON2_ADDR 0x0196 #define RCREG_ADDR 0x0199 #define TXREG_ADDR 0x019A #define SP1BRG_ADDR 0x019B #define SP1BRGL_ADDR 0x019B #define SPBRG_ADDR 0x019B #define SPBRGL_ADDR 0x019B #define SP1BRGH_ADDR 0x019C #define SPBRGH_ADDR 0x019C #define RCSTA_ADDR 0x019D #define TXSTA_ADDR 0x019E #define BAUDCON_ADDR 0x019F #define WPUA_ADDR 0x020C #define WPUB_ADDR 0x020D #define SSP1BUF_ADDR 0x0211 #define SSPBUF_ADDR 0x0211 #define SSP1ADD_ADDR 0x0212 #define SSPADD_ADDR 0x0212 #define SSP1MSK_ADDR 0x0213 #define SSPMSK_ADDR 0x0213 #define SSP1STAT_ADDR 0x0214 #define SSPSTAT_ADDR 0x0214 #define SSP1CON1_ADDR 0x0215 #define SSPCON_ADDR 0x0215 #define SSPCON1_ADDR 0x0215 #define SSP1CON2_ADDR 0x0216 #define SSPCON2_ADDR 0x0216 #define SSP1CON3_ADDR 0x0217 #define SSPCON3_ADDR 0x0217 #define CCPR1_ADDR 0x0291 #define CCPR1L_ADDR 0x0291 #define CCPR1H_ADDR 0x0292 #define CCP1CON_ADDR 0x0293 #define PWM1CON_ADDR 0x0294 #define CCP1AS_ADDR 0x0295 #define ECCP1AS_ADDR 0x0295 #define PSTR1CON_ADDR 0x0296 #define IOCBP_ADDR 0x0394 #define IOCBN_ADDR 0x0395 #define IOCBF_ADDR 0x0396 #define CLKRCON_ADDR 0x039A #define MDCON_ADDR 0x039C #define MDSRC_ADDR 0x039D #define MDCARL_ADDR 0x039E #define MDCARH_ADDR 0x039F #define STATUS_SHAD_ADDR 0x0FE4 #define WREG_SHAD_ADDR 0x0FE5 #define BSR_SHAD_ADDR 0x0FE6 #define PCLATH_SHAD_ADDR 0x0FE7 #define FSR0L_SHAD_ADDR 0x0FE8 #define FSR0H_SHAD_ADDR 0x0FE9 #define FSR1L_SHAD_ADDR 0x0FEA #define FSR1H_SHAD_ADDR 0x0FEB #define STKPTR_ADDR 0x0FED #define TOSL_ADDR 0x0FEE #define TOSH_ADDR 0x0FEF #endif // #ifndef NO_ADDR_DEFINES //============================================================================== // // Register Definitions // //============================================================================== extern __at(0x0000) __sfr INDF0; extern __at(0x0001) __sfr INDF1; extern __at(0x0002) __sfr PCL; //============================================================================== // STATUS Bits extern __at(0x0003) __sfr STATUS; typedef struct { unsigned C : 1; unsigned DC : 1; unsigned Z : 1; unsigned NOT_PD : 1; unsigned NOT_TO : 1; unsigned : 1; unsigned : 1; unsigned : 1; } __STATUSbits_t; extern __at(0x0003) volatile __STATUSbits_t STATUSbits; #define _C 0x01 #define _DC 0x02 #define _Z 0x04 #define _NOT_PD 0x08 #define _NOT_TO 0x10 //============================================================================== extern __at(0x0004) __sfr FSR0; extern __at(0x0004) __sfr FSR0L; extern __at(0x0005) __sfr FSR0H; extern __at(0x0006) __sfr FSR1; extern __at(0x0006) __sfr FSR1L; extern __at(0x0007) __sfr FSR1H; //============================================================================== // BSR Bits extern __at(0x0008) __sfr BSR; typedef union { struct { unsigned BSR0 : 1; unsigned BSR1 : 1; unsigned BSR2 : 1; unsigned BSR3 : 1; unsigned BSR4 : 1; unsigned : 1; unsigned : 1; unsigned : 1; }; struct { unsigned BSR : 5; unsigned : 3; }; } __BSRbits_t; extern __at(0x0008) volatile __BSRbits_t BSRbits; #define _BSR0 0x01 #define _BSR1 0x02 #define _BSR2 0x04 #define _BSR3 0x08 #define _BSR4 0x10 //============================================================================== extern __at(0x0009) __sfr WREG; extern __at(0x000A) __sfr PCLATH; //============================================================================== // INTCON Bits extern __at(0x000B) __sfr INTCON; typedef union { struct { unsigned IOCIF : 1; unsigned INTF : 1; unsigned TMR0IF : 1; unsigned IOCIE : 1; unsigned INTE : 1; unsigned TMR0IE : 1; unsigned PEIE : 1; unsigned GIE : 1; }; struct { unsigned : 1; unsigned : 1; unsigned T0IF : 1; unsigned : 1; unsigned : 1; unsigned T0IE : 1; unsigned : 1; unsigned : 1; }; } __INTCONbits_t; extern __at(0x000B) volatile __INTCONbits_t INTCONbits; #define _IOCIF 0x01 #define _INTF 0x02 #define _TMR0IF 0x04 #define _T0IF 0x04 #define _IOCIE 0x08 #define _INTE 0x10 #define _TMR0IE 0x20 #define _T0IE 0x20 #define _PEIE 0x40 #define _GIE 0x80 //============================================================================== //============================================================================== // PORTA Bits extern __at(0x000C) __sfr PORTA; typedef struct { unsigned RA0 : 1; unsigned RA1 : 1; unsigned RA2 : 1; unsigned RA3 : 1; unsigned RA4 : 1; unsigned RA5 : 1; unsigned RA6 : 1; unsigned RA7 : 1; } __PORTAbits_t; extern __at(0x000C) volatile __PORTAbits_t PORTAbits; #define _RA0 0x01 #define _RA1 0x02 #define _RA2 0x04 #define _RA3 0x08 #define _RA4 0x10 #define _RA5 0x20 #define _RA6 0x40 #define _RA7 0x80 //============================================================================== //============================================================================== // PORTB Bits extern __at(0x000D) __sfr PORTB; typedef struct { unsigned RB0 : 1; unsigned RB1 : 1; unsigned RB2 : 1; unsigned RB3 : 1; unsigned RB4 : 1; unsigned RB5 : 1; unsigned RB6 : 1; unsigned RB7 : 1; } __PORTBbits_t; extern __at(0x000D) volatile __PORTBbits_t PORTBbits; #define _RB0 0x01 #define _RB1 0x02 #define _RB2 0x04 #define _RB3 0x08 #define _RB4 0x10 #define _RB5 0x20 #define _RB6 0x40 #define _RB7 0x80 //============================================================================== //============================================================================== // PIR1 Bits extern __at(0x0011) __sfr PIR1; typedef struct { unsigned TMR1IF : 1; unsigned TMR2IF : 1; unsigned CCP1IF : 1; unsigned SSP1IF : 1; unsigned TXIF : 1; unsigned RCIF : 1; unsigned ADIF : 1; unsigned TMR1GIF : 1; } __PIR1bits_t; extern __at(0x0011) volatile __PIR1bits_t PIR1bits; #define _TMR1IF 0x01 #define _TMR2IF 0x02 #define _CCP1IF 0x04 #define _SSP1IF 0x08 #define _TXIF 0x10 #define _RCIF 0x20 #define _ADIF 0x40 #define _TMR1GIF 0x80 //============================================================================== //============================================================================== // PIR2 Bits extern __at(0x0012) __sfr PIR2; typedef struct { unsigned : 1; unsigned : 1; unsigned : 1; unsigned BCL1IF : 1; unsigned EEIF : 1; unsigned C1IF : 1; unsigned C2IF : 1; unsigned OSFIF : 1; } __PIR2bits_t; extern __at(0x0012) volatile __PIR2bits_t PIR2bits; #define _BCL1IF 0x08 #define _EEIF 0x10 #define _C1IF 0x20 #define _C2IF 0x40 #define _OSFIF 0x80 //============================================================================== extern __at(0x0015) __sfr TMR0; extern __at(0x0016) __sfr TMR1; extern __at(0x0016) __sfr TMR1L; extern __at(0x0017) __sfr TMR1H; //============================================================================== // T1CON Bits extern __at(0x0018) __sfr T1CON; typedef union { struct { unsigned TMR1ON : 1; unsigned : 1; unsigned NOT_T1SYNC : 1; unsigned T1OSCEN : 1; unsigned T1CKPS0 : 1; unsigned T1CKPS1 : 1; unsigned TMR1CS0 : 1; unsigned TMR1CS1 : 1; }; struct { unsigned : 4; unsigned T1CKPS : 2; unsigned : 2; }; struct { unsigned : 6; unsigned TMR1CS : 2; }; } __T1CONbits_t; extern __at(0x0018) volatile __T1CONbits_t T1CONbits; #define _TMR1ON 0x01 #define _NOT_T1SYNC 0x04 #define _T1OSCEN 0x08 #define _T1CKPS0 0x10 #define _T1CKPS1 0x20 #define _TMR1CS0 0x40 #define _TMR1CS1 0x80 //============================================================================== //============================================================================== // T1GCON Bits extern __at(0x0019) __sfr T1GCON; typedef union { struct { unsigned T1GSS0 : 1; unsigned T1GSS1 : 1; unsigned T1GVAL : 1; unsigned T1GGO : 1; unsigned T1GSPM : 1; unsigned T1GTM : 1; unsigned T1GPOL : 1; unsigned TMR1GE : 1; }; struct { unsigned T1GSS : 2; unsigned : 6; }; } __T1GCONbits_t; extern __at(0x0019) volatile __T1GCONbits_t T1GCONbits; #define _T1GSS0 0x01 #define _T1GSS1 0x02 #define _T1GVAL 0x04 #define _T1GGO 0x08 #define _T1GSPM 0x10 #define _T1GTM 0x20 #define _T1GPOL 0x40 #define _TMR1GE 0x80 //============================================================================== extern __at(0x001A) __sfr TMR2; extern __at(0x001B) __sfr PR2; //============================================================================== // T2CON Bits extern __at(0x001C) __sfr T2CON; typedef union { struct { unsigned T2CKPS0 : 1; unsigned T2CKPS1 : 1; unsigned TMR2ON : 1; unsigned T2OUTPS0 : 1; unsigned T2OUTPS1 : 1; unsigned T2OUTPS2 : 1; unsigned T2OUTPS3 : 1; unsigned : 1; }; struct { unsigned T2CKPS : 2; unsigned : 6; }; struct { unsigned : 3; unsigned T2OUTPS : 4; unsigned : 1; }; } __T2CONbits_t; extern __at(0x001C) volatile __T2CONbits_t T2CONbits; #define _T2CKPS0 0x01 #define _T2CKPS1 0x02 #define _TMR2ON 0x04 #define _T2OUTPS0 0x08 #define _T2OUTPS1 0x10 #define _T2OUTPS2 0x20 #define _T2OUTPS3 0x40 //============================================================================== //============================================================================== // CPSCON0 Bits extern __at(0x001E) __sfr CPSCON0; typedef union { struct { unsigned T0XCS : 1; unsigned CPSOUT : 1; unsigned CPSRNG0 : 1; unsigned CPSRNG1 : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned CPSON : 1; }; struct { unsigned : 2; unsigned CPSRNG : 2; unsigned : 4; }; } __CPSCON0bits_t; extern __at(0x001E) volatile __CPSCON0bits_t CPSCON0bits; #define _T0XCS 0x01 #define _CPSOUT 0x02 #define _CPSRNG0 0x04 #define _CPSRNG1 0x08 #define _CPSON 0x80 //============================================================================== //============================================================================== // CPSCON1 Bits extern __at(0x001F) __sfr CPSCON1; typedef union { struct { unsigned CPSCH0 : 1; unsigned CPSCH1 : 1; unsigned CPSCH2 : 1; unsigned CPSCH3 : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; }; struct { unsigned CPSCH : 4; unsigned : 4; }; } __CPSCON1bits_t; extern __at(0x001F) volatile __CPSCON1bits_t CPSCON1bits; #define _CPSCH0 0x01 #define _CPSCH1 0x02 #define _CPSCH2 0x04 #define _CPSCH3 0x08 //============================================================================== //============================================================================== // TRISA Bits extern __at(0x008C) __sfr TRISA; typedef struct { unsigned TRISA0 : 1; unsigned TRISA1 : 1; unsigned TRISA2 : 1; unsigned TRISA3 : 1; unsigned TRISA4 : 1; unsigned TRISA5 : 1; unsigned TRISA6 : 1; unsigned TRISA7 : 1; } __TRISAbits_t; extern __at(0x008C) volatile __TRISAbits_t TRISAbits; #define _TRISA0 0x01 #define _TRISA1 0x02 #define _TRISA2 0x04 #define _TRISA3 0x08 #define _TRISA4 0x10 #define _TRISA5 0x20 #define _TRISA6 0x40 #define _TRISA7 0x80 //============================================================================== //============================================================================== // TRISB Bits extern __at(0x008D) __sfr TRISB; typedef struct { unsigned TRISB0 : 1; unsigned TRISB1 : 1; unsigned TRISB2 : 1; unsigned TRISB3 : 1; unsigned TRISB4 : 1; unsigned TRISB5 : 1; unsigned TRISB6 : 1; unsigned TRISB7 : 1; } __TRISBbits_t; extern __at(0x008D) volatile __TRISBbits_t TRISBbits; #define _TRISB0 0x01 #define _TRISB1 0x02 #define _TRISB2 0x04 #define _TRISB3 0x08 #define _TRISB4 0x10 #define _TRISB5 0x20 #define _TRISB6 0x40 #define _TRISB7 0x80 //============================================================================== //============================================================================== // PIE1 Bits extern __at(0x0091) __sfr PIE1; typedef struct { unsigned TMR1IE : 1; unsigned TMR2IE : 1; unsigned CCP1IE : 1; unsigned SSP1IE : 1; unsigned TXIE : 1; unsigned RCIE : 1; unsigned ADIE : 1; unsigned TMR1GIE : 1; } __PIE1bits_t; extern __at(0x0091) volatile __PIE1bits_t PIE1bits; #define _TMR1IE 0x01 #define _TMR2IE 0x02 #define _CCP1IE 0x04 #define _SSP1IE 0x08 #define _TXIE 0x10 #define _RCIE 0x20 #define _ADIE 0x40 #define _TMR1GIE 0x80 //============================================================================== //============================================================================== // PIE2 Bits extern __at(0x0092) __sfr PIE2; typedef struct { unsigned : 1; unsigned : 1; unsigned : 1; unsigned BCL1IE : 1; unsigned EEIE : 1; unsigned C1IE : 1; unsigned C2IE : 1; unsigned OSFIE : 1; } __PIE2bits_t; extern __at(0x0092) volatile __PIE2bits_t PIE2bits; #define _BCL1IE 0x08 #define _EEIE 0x10 #define _C1IE 0x20 #define _C2IE 0x40 #define _OSFIE 0x80 //============================================================================== //============================================================================== // OPTION_REG Bits extern __at(0x0095) __sfr OPTION_REG; typedef union { struct { unsigned PS0 : 1; unsigned PS1 : 1; unsigned PS2 : 1; unsigned PSA : 1; unsigned TMR0SE : 1; unsigned TMR0CS : 1; unsigned INTEDG : 1; unsigned NOT_WPUEN : 1; }; struct { unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned T0SE : 1; unsigned T0CS : 1; unsigned : 1; unsigned : 1; }; struct { unsigned PS : 3; unsigned : 5; }; } __OPTION_REGbits_t; extern __at(0x0095) volatile __OPTION_REGbits_t OPTION_REGbits; #define _PS0 0x01 #define _PS1 0x02 #define _PS2 0x04 #define _PSA 0x08 #define _TMR0SE 0x10 #define _T0SE 0x10 #define _TMR0CS 0x20 #define _T0CS 0x20 #define _INTEDG 0x40 #define _NOT_WPUEN 0x80 //============================================================================== //============================================================================== // PCON Bits extern __at(0x0096) __sfr PCON; typedef struct { unsigned NOT_BOR : 1; unsigned NOT_POR : 1; unsigned NOT_RI : 1; unsigned NOT_RMCLR : 1; unsigned : 1; unsigned : 1; unsigned STKUNF : 1; unsigned STKOVF : 1; } __PCONbits_t; extern __at(0x0096) volatile __PCONbits_t PCONbits; #define _NOT_BOR 0x01 #define _NOT_POR 0x02 #define _NOT_RI 0x04 #define _NOT_RMCLR 0x08 #define _STKUNF 0x40 #define _STKOVF 0x80 //============================================================================== //============================================================================== // WDTCON Bits extern __at(0x0097) __sfr WDTCON; typedef union { struct { unsigned SWDTEN : 1; unsigned WDTPS0 : 1; unsigned WDTPS1 : 1; unsigned WDTPS2 : 1; unsigned WDTPS3 : 1; unsigned WDTPS4 : 1; unsigned : 1; unsigned : 1; }; struct { unsigned : 1; unsigned WDTPS : 5; unsigned : 2; }; } __WDTCONbits_t; extern __at(0x0097) volatile __WDTCONbits_t WDTCONbits; #define _SWDTEN 0x01 #define _WDTPS0 0x02 #define _WDTPS1 0x04 #define _WDTPS2 0x08 #define _WDTPS3 0x10 #define _WDTPS4 0x20 //============================================================================== //============================================================================== // OSCTUNE Bits extern __at(0x0098) __sfr OSCTUNE; typedef union { struct { unsigned TUN0 : 1; unsigned TUN1 : 1; unsigned TUN2 : 1; unsigned TUN3 : 1; unsigned TUN4 : 1; unsigned TUN5 : 1; unsigned : 1; unsigned : 1; }; struct { unsigned TUN : 6; unsigned : 2; }; } __OSCTUNEbits_t; extern __at(0x0098) volatile __OSCTUNEbits_t OSCTUNEbits; #define _TUN0 0x01 #define _TUN1 0x02 #define _TUN2 0x04 #define _TUN3 0x08 #define _TUN4 0x10 #define _TUN5 0x20 //============================================================================== //============================================================================== // OSCCON Bits extern __at(0x0099) __sfr OSCCON; typedef union { struct { unsigned SCS0 : 1; unsigned SCS1 : 1; unsigned : 1; unsigned IRCF0 : 1; unsigned IRCF1 : 1; unsigned IRCF2 : 1; unsigned IRCF3 : 1; unsigned SPLLEN : 1; }; struct { unsigned SCS : 2; unsigned : 6; }; struct { unsigned : 3; unsigned IRCF : 4; unsigned : 1; }; } __OSCCONbits_t; extern __at(0x0099) volatile __OSCCONbits_t OSCCONbits; #define _SCS0 0x01 #define _SCS1 0x02 #define _IRCF0 0x08 #define _IRCF1 0x10 #define _IRCF2 0x20 #define _IRCF3 0x40 #define _SPLLEN 0x80 //============================================================================== //============================================================================== // OSCSTAT Bits extern __at(0x009A) __sfr OSCSTAT; typedef struct { unsigned HFIOFS : 1; unsigned LFIOFR : 1; unsigned MFIOFR : 1; unsigned HFIOFL : 1; unsigned HFIOFR : 1; unsigned OSTS : 1; unsigned PLLR : 1; unsigned T1OSCR : 1; } __OSCSTATbits_t; extern __at(0x009A) volatile __OSCSTATbits_t OSCSTATbits; #define _HFIOFS 0x01 #define _LFIOFR 0x02 #define _MFIOFR 0x04 #define _HFIOFL 0x08 #define _HFIOFR 0x10 #define _OSTS 0x20 #define _PLLR 0x40 #define _T1OSCR 0x80 //============================================================================== extern __at(0x009B) __sfr ADRES; extern __at(0x009B) __sfr ADRESL; extern __at(0x009C) __sfr ADRESH; //============================================================================== // ADCON0 Bits extern __at(0x009D) __sfr ADCON0; typedef union { struct { unsigned ADON : 1; unsigned GO_NOT_DONE : 1; unsigned CHS0 : 1; unsigned CHS1 : 1; unsigned CHS2 : 1; unsigned CHS3 : 1; unsigned CHS4 : 1; unsigned : 1; }; struct { unsigned : 1; unsigned ADGO : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; }; struct { unsigned : 1; unsigned GO : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; }; struct { unsigned : 2; unsigned CHS : 5; unsigned : 1; }; } __ADCON0bits_t; extern __at(0x009D) volatile __ADCON0bits_t ADCON0bits; #define _ADON 0x01 #define _GO_NOT_DONE 0x02 #define _ADGO 0x02 #define _GO 0x02 #define _CHS0 0x04 #define _CHS1 0x08 #define _CHS2 0x10 #define _CHS3 0x20 #define _CHS4 0x40 //============================================================================== //============================================================================== // ADCON1 Bits extern __at(0x009E) __sfr ADCON1; typedef union { struct { unsigned ADPREF0 : 1; unsigned ADPREF1 : 1; unsigned ADNREF : 1; unsigned : 1; unsigned ADCS0 : 1; unsigned ADCS1 : 1; unsigned ADCS2 : 1; unsigned ADFM : 1; }; struct { unsigned ADPREF : 2; unsigned : 6; }; struct { unsigned : 4; unsigned ADCS : 3; unsigned : 1; }; } __ADCON1bits_t; extern __at(0x009E) volatile __ADCON1bits_t ADCON1bits; #define _ADPREF0 0x01 #define _ADPREF1 0x02 #define _ADNREF 0x04 #define _ADCS0 0x10 #define _ADCS1 0x20 #define _ADCS2 0x40 #define _ADFM 0x80 //============================================================================== //============================================================================== // LATA Bits extern __at(0x010C) __sfr LATA; typedef struct { unsigned LATA0 : 1; unsigned LATA1 : 1; unsigned LATA2 : 1; unsigned LATA3 : 1; unsigned LATA4 : 1; unsigned : 1; unsigned LATA6 : 1; unsigned LATA7 : 1; } __LATAbits_t; extern __at(0x010C) volatile __LATAbits_t LATAbits; #define _LATA0 0x01 #define _LATA1 0x02 #define _LATA2 0x04 #define _LATA3 0x08 #define _LATA4 0x10 #define _LATA6 0x40 #define _LATA7 0x80 //============================================================================== //============================================================================== // LATB Bits extern __at(0x010D) __sfr LATB; typedef struct { unsigned LATB0 : 1; unsigned LATB1 : 1; unsigned LATB2 : 1; unsigned LATB3 : 1; unsigned LATB4 : 1; unsigned LATB5 : 1; unsigned LATB6 : 1; unsigned LATB7 : 1; } __LATBbits_t; extern __at(0x010D) volatile __LATBbits_t LATBbits; #define _LATB0 0x01 #define _LATB1 0x02 #define _LATB2 0x04 #define _LATB3 0x08 #define _LATB4 0x10 #define _LATB5 0x20 #define _LATB6 0x40 #define _LATB7 0x80 //============================================================================== //============================================================================== // CM1CON0 Bits extern __at(0x0111) __sfr CM1CON0; typedef struct { unsigned C1SYNC : 1; unsigned C1HYS : 1; unsigned C1SP : 1; unsigned : 1; unsigned C1POL : 1; unsigned C1OE : 1; unsigned C1OUT : 1; unsigned C1ON : 1; } __CM1CON0bits_t; extern __at(0x0111) volatile __CM1CON0bits_t CM1CON0bits; #define _C1SYNC 0x01 #define _C1HYS 0x02 #define _C1SP 0x04 #define _C1POL 0x10 #define _C1OE 0x20 #define _C1OUT 0x40 #define _C1ON 0x80 //============================================================================== //============================================================================== // CM1CON1 Bits extern __at(0x0112) __sfr CM1CON1; typedef union { struct { unsigned C1NCH0 : 1; unsigned C1NCH1 : 1; unsigned : 1; unsigned : 1; unsigned C1PCH0 : 1; unsigned C1PCH1 : 1; unsigned C1INTN : 1; unsigned C1INTP : 1; }; struct { unsigned C1NCH : 2; unsigned : 6; }; struct { unsigned : 4; unsigned C1PCH : 2; unsigned : 2; }; } __CM1CON1bits_t; extern __at(0x0112) volatile __CM1CON1bits_t CM1CON1bits; #define _C1NCH0 0x01 #define _C1NCH1 0x02 #define _C1PCH0 0x10 #define _C1PCH1 0x20 #define _C1INTN 0x40 #define _C1INTP 0x80 //============================================================================== //============================================================================== // CM2CON0 Bits extern __at(0x0113) __sfr CM2CON0; typedef struct { unsigned C2SYNC : 1; unsigned C2HYS : 1; unsigned C2SP : 1; unsigned : 1; unsigned C2POL : 1; unsigned C2OE : 1; unsigned C2OUT : 1; unsigned C2ON : 1; } __CM2CON0bits_t; extern __at(0x0113) volatile __CM2CON0bits_t CM2CON0bits; #define _C2SYNC 0x01 #define _C2HYS 0x02 #define _C2SP 0x04 #define _C2POL 0x10 #define _C2OE 0x20 #define _C2OUT 0x40 #define _C2ON 0x80 //============================================================================== //============================================================================== // CM2CON1 Bits extern __at(0x0114) __sfr CM2CON1; typedef union { struct { unsigned C2NCH0 : 1; unsigned C2NCH1 : 1; unsigned : 1; unsigned : 1; unsigned C2PCH0 : 1; unsigned C2PCH1 : 1; unsigned C2INTN : 1; unsigned C2INTP : 1; }; struct { unsigned C2NCH : 2; unsigned : 6; }; struct { unsigned : 4; unsigned C2PCH : 2; unsigned : 2; }; } __CM2CON1bits_t; extern __at(0x0114) volatile __CM2CON1bits_t CM2CON1bits; #define _C2NCH0 0x01 #define _C2NCH1 0x02 #define _C2PCH0 0x10 #define _C2PCH1 0x20 #define _C2INTN 0x40 #define _C2INTP 0x80 //============================================================================== //============================================================================== // CMOUT Bits extern __at(0x0115) __sfr CMOUT; typedef struct { unsigned MC1OUT : 1; unsigned MC2OUT : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; } __CMOUTbits_t; extern __at(0x0115) volatile __CMOUTbits_t CMOUTbits; #define _MC1OUT 0x01 #define _MC2OUT 0x02 //============================================================================== //============================================================================== // BORCON Bits extern __at(0x0116) __sfr BORCON; typedef struct { unsigned BORRDY : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned SBOREN : 1; } __BORCONbits_t; extern __at(0x0116) volatile __BORCONbits_t BORCONbits; #define _BORRDY 0x01 #define _SBOREN 0x80 //============================================================================== //============================================================================== // FVRCON Bits extern __at(0x0117) __sfr FVRCON; typedef union { struct { unsigned ADFVR0 : 1; unsigned ADFVR1 : 1; unsigned CDAFVR0 : 1; unsigned CDAFVR1 : 1; unsigned TSRNG : 1; unsigned TSEN : 1; unsigned FVRRDY : 1; unsigned FVREN : 1; }; struct { unsigned ADFVR : 2; unsigned : 6; }; struct { unsigned : 2; unsigned CDAFVR : 2; unsigned : 4; }; } __FVRCONbits_t; extern __at(0x0117) volatile __FVRCONbits_t FVRCONbits; #define _ADFVR0 0x01 #define _ADFVR1 0x02 #define _CDAFVR0 0x04 #define _CDAFVR1 0x08 #define _TSRNG 0x10 #define _TSEN 0x20 #define _FVRRDY 0x40 #define _FVREN 0x80 //============================================================================== //============================================================================== // DACCON0 Bits extern __at(0x0118) __sfr DACCON0; typedef union { struct { unsigned DACNSS : 1; unsigned : 1; unsigned DACPSS0 : 1; unsigned DACPSS1 : 1; unsigned : 1; unsigned DACOE : 1; unsigned DACLPS : 1; unsigned DACEN : 1; }; struct { unsigned : 2; unsigned DACPSS : 2; unsigned : 4; }; } __DACCON0bits_t; extern __at(0x0118) volatile __DACCON0bits_t DACCON0bits; #define _DACNSS 0x01 #define _DACPSS0 0x04 #define _DACPSS1 0x08 #define _DACOE 0x20 #define _DACLPS 0x40 #define _DACEN 0x80 //============================================================================== //============================================================================== // DACCON1 Bits extern __at(0x0119) __sfr DACCON1; typedef union { struct { unsigned DACR0 : 1; unsigned DACR1 : 1; unsigned DACR2 : 1; unsigned DACR3 : 1; unsigned DACR4 : 1; unsigned : 1; unsigned : 1; unsigned : 1; }; struct { unsigned DACR : 5; unsigned : 3; }; } __DACCON1bits_t; extern __at(0x0119) volatile __DACCON1bits_t DACCON1bits; #define _DACR0 0x01 #define _DACR1 0x02 #define _DACR2 0x04 #define _DACR3 0x08 #define _DACR4 0x10 //============================================================================== //============================================================================== // SRCON0 Bits extern __at(0x011A) __sfr SRCON0; typedef union { struct { unsigned SRPR : 1; unsigned SRPS : 1; unsigned SRNQEN : 1; unsigned SRQEN : 1; unsigned SRCLK0 : 1; unsigned SRCLK1 : 1; unsigned SRCLK2 : 1; unsigned SRLEN : 1; }; struct { unsigned : 4; unsigned SRCLK : 3; unsigned : 1; }; } __SRCON0bits_t; extern __at(0x011A) volatile __SRCON0bits_t SRCON0bits; #define _SRPR 0x01 #define _SRPS 0x02 #define _SRNQEN 0x04 #define _SRQEN 0x08 #define _SRCLK0 0x10 #define _SRCLK1 0x20 #define _SRCLK2 0x40 #define _SRLEN 0x80 //============================================================================== //============================================================================== // SRCON1 Bits extern __at(0x011B) __sfr SRCON1; typedef struct { unsigned SRRC1E : 1; unsigned SRRC2E : 1; unsigned SRRCKE : 1; unsigned SRRPE : 1; unsigned SRSC1E : 1; unsigned SRSC2E : 1; unsigned SRSCKE : 1; unsigned SRSPE : 1; } __SRCON1bits_t; extern __at(0x011B) volatile __SRCON1bits_t SRCON1bits; #define _SRRC1E 0x01 #define _SRRC2E 0x02 #define _SRRCKE 0x04 #define _SRRPE 0x08 #define _SRSC1E 0x10 #define _SRSC2E 0x20 #define _SRSCKE 0x40 #define _SRSPE 0x80 //============================================================================== //============================================================================== // APFCON0 Bits extern __at(0x011D) __sfr APFCON0; typedef struct { unsigned CCP1SEL : 1; unsigned P1CSEL : 1; unsigned P1DSEL : 1; unsigned : 1; unsigned : 1; unsigned SS1SEL : 1; unsigned SDO1SEL : 1; unsigned RXDTSEL : 1; } __APFCON0bits_t; extern __at(0x011D) volatile __APFCON0bits_t APFCON0bits; #define _CCP1SEL 0x01 #define _P1CSEL 0x02 #define _P1DSEL 0x04 #define _SS1SEL 0x20 #define _SDO1SEL 0x40 #define _RXDTSEL 0x80 //============================================================================== //============================================================================== // APFCON1 Bits extern __at(0x011E) __sfr APFCON1; typedef struct { unsigned TXCKSEL : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; } __APFCON1bits_t; extern __at(0x011E) volatile __APFCON1bits_t APFCON1bits; #define _TXCKSEL 0x01 //============================================================================== //============================================================================== // ANSELA Bits extern __at(0x018C) __sfr ANSELA; typedef union { struct { unsigned ANSA0 : 1; unsigned ANSA1 : 1; unsigned ANSA2 : 1; unsigned ANSA3 : 1; unsigned ANSA4 : 1; unsigned : 1; unsigned : 1; unsigned : 1; }; struct { unsigned ANSA : 5; unsigned : 3; }; } __ANSELAbits_t; extern __at(0x018C) volatile __ANSELAbits_t ANSELAbits; #define _ANSA0 0x01 #define _ANSA1 0x02 #define _ANSA2 0x04 #define _ANSA3 0x08 #define _ANSA4 0x10 //============================================================================== //============================================================================== // ANSELB Bits extern __at(0x018D) __sfr ANSELB; typedef struct { unsigned : 1; unsigned ANSB1 : 1; unsigned ANSB2 : 1; unsigned ANSB3 : 1; unsigned ANSB4 : 1; unsigned ANSB5 : 1; unsigned ANSB6 : 1; unsigned ANSB7 : 1; } __ANSELBbits_t; extern __at(0x018D) volatile __ANSELBbits_t ANSELBbits; #define _ANSB1 0x02 #define _ANSB2 0x04 #define _ANSB3 0x08 #define _ANSB4 0x10 #define _ANSB5 0x20 #define _ANSB6 0x40 #define _ANSB7 0x80 //============================================================================== extern __at(0x0191) __sfr EEADR; extern __at(0x0191) __sfr EEADRL; extern __at(0x0192) __sfr EEADRH; extern __at(0x0193) __sfr EEDAT; extern __at(0x0193) __sfr EEDATL; extern __at(0x0194) __sfr EEDATH; //============================================================================== // EECON1 Bits extern __at(0x0195) __sfr EECON1; typedef struct { unsigned RD : 1; unsigned WR : 1; unsigned WREN : 1; unsigned WRERR : 1; unsigned FREE : 1; unsigned LWLO : 1; unsigned CFGS : 1; unsigned EEPGD : 1; } __EECON1bits_t; extern __at(0x0195) volatile __EECON1bits_t EECON1bits; #define _RD 0x01 #define _WR 0x02 #define _WREN 0x04 #define _WRERR 0x08 #define _FREE 0x10 #define _LWLO 0x20 #define _CFGS 0x40 #define _EEPGD 0x80 //============================================================================== extern __at(0x0196) __sfr EECON2; extern __at(0x0199) __sfr RCREG; extern __at(0x019A) __sfr TXREG; extern __at(0x019B) __sfr SP1BRG; extern __at(0x019B) __sfr SP1BRGL; extern __at(0x019B) __sfr SPBRG; extern __at(0x019B) __sfr SPBRGL; extern __at(0x019C) __sfr SP1BRGH; extern __at(0x019C) __sfr SPBRGH; //============================================================================== // RCSTA Bits extern __at(0x019D) __sfr RCSTA; typedef struct { unsigned RX9D : 1; unsigned OERR : 1; unsigned FERR : 1; unsigned ADDEN : 1; unsigned CREN : 1; unsigned SREN : 1; unsigned RX9 : 1; unsigned SPEN : 1; } __RCSTAbits_t; extern __at(0x019D) volatile __RCSTAbits_t RCSTAbits; #define _RX9D 0x01 #define _OERR 0x02 #define _FERR 0x04 #define _ADDEN 0x08 #define _CREN 0x10 #define _SREN 0x20 #define _RX9 0x40 #define _SPEN 0x80 //============================================================================== //============================================================================== // TXSTA Bits extern __at(0x019E) __sfr TXSTA; typedef struct { unsigned TX9D : 1; unsigned TRMT : 1; unsigned BRGH : 1; unsigned SENDB : 1; unsigned SYNC : 1; unsigned TXEN : 1; unsigned TX9 : 1; unsigned CSRC : 1; } __TXSTAbits_t; extern __at(0x019E) volatile __TXSTAbits_t TXSTAbits; #define _TX9D 0x01 #define _TRMT 0x02 #define _BRGH 0x04 #define _SENDB 0x08 #define _SYNC 0x10 #define _TXEN 0x20 #define _TX9 0x40 #define _CSRC 0x80 //============================================================================== //============================================================================== // BAUDCON Bits extern __at(0x019F) __sfr BAUDCON; typedef struct { unsigned ABDEN : 1; unsigned WUE : 1; unsigned : 1; unsigned BRG16 : 1; unsigned SCKP : 1; unsigned : 1; unsigned RCIDL : 1; unsigned ABDOVF : 1; } __BAUDCONbits_t; extern __at(0x019F) volatile __BAUDCONbits_t BAUDCONbits; #define _ABDEN 0x01 #define _WUE 0x02 #define _BRG16 0x08 #define _SCKP 0x10 #define _RCIDL 0x40 #define _ABDOVF 0x80 //============================================================================== //============================================================================== // WPUA Bits extern __at(0x020C) __sfr WPUA; typedef struct { unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned WPUA5 : 1; unsigned : 1; unsigned : 1; } __WPUAbits_t; extern __at(0x020C) volatile __WPUAbits_t WPUAbits; #define _WPUA5 0x20 //============================================================================== //============================================================================== // WPUB Bits extern __at(0x020D) __sfr WPUB; typedef struct { unsigned WPUB0 : 1; unsigned WPUB1 : 1; unsigned WPUB2 : 1; unsigned WPUB3 : 1; unsigned WPUB4 : 1; unsigned WPUB5 : 1; unsigned WPUB6 : 1; unsigned WPUB7 : 1; } __WPUBbits_t; extern __at(0x020D) volatile __WPUBbits_t WPUBbits; #define _WPUB0 0x01 #define _WPUB1 0x02 #define _WPUB2 0x04 #define _WPUB3 0x08 #define _WPUB4 0x10 #define _WPUB5 0x20 #define _WPUB6 0x40 #define _WPUB7 0x80 //============================================================================== extern __at(0x0211) __sfr SSP1BUF; extern __at(0x0211) __sfr SSPBUF; extern __at(0x0212) __sfr SSP1ADD; extern __at(0x0212) __sfr SSPADD; extern __at(0x0213) __sfr SSP1MSK; extern __at(0x0213) __sfr SSPMSK; //============================================================================== // SSP1STAT Bits extern __at(0x0214) __sfr SSP1STAT; typedef struct { unsigned BF : 1; unsigned UA : 1; unsigned R_NOT_W : 1; unsigned S : 1; unsigned P : 1; unsigned D_NOT_A : 1; unsigned CKE : 1; unsigned SMP : 1; } __SSP1STATbits_t; extern __at(0x0214) volatile __SSP1STATbits_t SSP1STATbits; #define _BF 0x01 #define _UA 0x02 #define _R_NOT_W 0x04 #define _S 0x08 #define _P 0x10 #define _D_NOT_A 0x20 #define _CKE 0x40 #define _SMP 0x80 //============================================================================== //============================================================================== // SSPSTAT Bits extern __at(0x0214) __sfr SSPSTAT; typedef struct { unsigned BF : 1; unsigned UA : 1; unsigned R_NOT_W : 1; unsigned S : 1; unsigned P : 1; unsigned D_NOT_A : 1; unsigned CKE : 1; unsigned SMP : 1; } __SSPSTATbits_t; extern __at(0x0214) volatile __SSPSTATbits_t SSPSTATbits; #define _SSPSTAT_BF 0x01 #define _SSPSTAT_UA 0x02 #define _SSPSTAT_R_NOT_W 0x04 #define _SSPSTAT_S 0x08 #define _SSPSTAT_P 0x10 #define _SSPSTAT_D_NOT_A 0x20 #define _SSPSTAT_CKE 0x40 #define _SSPSTAT_SMP 0x80 //============================================================================== //============================================================================== // SSP1CON1 Bits extern __at(0x0215) __sfr SSP1CON1; typedef union { struct { unsigned SSPM0 : 1; unsigned SSPM1 : 1; unsigned SSPM2 : 1; unsigned SSPM3 : 1; unsigned CKP : 1; unsigned SSPEN : 1; unsigned SSPOV : 1; unsigned WCOL : 1; }; struct { unsigned SSPM : 4; unsigned : 4; }; } __SSP1CON1bits_t; extern __at(0x0215) volatile __SSP1CON1bits_t SSP1CON1bits; #define _SSPM0 0x01 #define _SSPM1 0x02 #define _SSPM2 0x04 #define _SSPM3 0x08 #define _CKP 0x10 #define _SSPEN 0x20 #define _SSPOV 0x40 #define _WCOL 0x80 //============================================================================== //============================================================================== // SSPCON Bits extern __at(0x0215) __sfr SSPCON; typedef union { struct { unsigned SSPM0 : 1; unsigned SSPM1 : 1; unsigned SSPM2 : 1; unsigned SSPM3 : 1; unsigned CKP : 1; unsigned SSPEN : 1; unsigned SSPOV : 1; unsigned WCOL : 1; }; struct { unsigned SSPM : 4; unsigned : 4; }; } __SSPCONbits_t; extern __at(0x0215) volatile __SSPCONbits_t SSPCONbits; #define _SSPCON_SSPM0 0x01 #define _SSPCON_SSPM1 0x02 #define _SSPCON_SSPM2 0x04 #define _SSPCON_SSPM3 0x08 #define _SSPCON_CKP 0x10 #define _SSPCON_SSPEN 0x20 #define _SSPCON_SSPOV 0x40 #define _SSPCON_WCOL 0x80 //============================================================================== //============================================================================== // SSPCON1 Bits extern __at(0x0215) __sfr SSPCON1; typedef union { struct { unsigned SSPM0 : 1; unsigned SSPM1 : 1; unsigned SSPM2 : 1; unsigned SSPM3 : 1; unsigned CKP : 1; unsigned SSPEN : 1; unsigned SSPOV : 1; unsigned WCOL : 1; }; struct { unsigned SSPM : 4; unsigned : 4; }; } __SSPCON1bits_t; extern __at(0x0215) volatile __SSPCON1bits_t SSPCON1bits; #define _SSPCON1_SSPM0 0x01 #define _SSPCON1_SSPM1 0x02 #define _SSPCON1_SSPM2 0x04 #define _SSPCON1_SSPM3 0x08 #define _SSPCON1_CKP 0x10 #define _SSPCON1_SSPEN 0x20 #define _SSPCON1_SSPOV 0x40 #define _SSPCON1_WCOL 0x80 //============================================================================== //============================================================================== // SSP1CON2 Bits extern __at(0x0216) __sfr SSP1CON2; typedef struct { unsigned SEN : 1; unsigned RSEN : 1; unsigned PEN : 1; unsigned RCEN : 1; unsigned ACKEN : 1; unsigned ACKDT : 1; unsigned ACKSTAT : 1; unsigned GCEN : 1; } __SSP1CON2bits_t; extern __at(0x0216) volatile __SSP1CON2bits_t SSP1CON2bits; #define _SEN 0x01 #define _RSEN 0x02 #define _PEN 0x04 #define _RCEN 0x08 #define _ACKEN 0x10 #define _ACKDT 0x20 #define _ACKSTAT 0x40 #define _GCEN 0x80 //============================================================================== //============================================================================== // SSPCON2 Bits extern __at(0x0216) __sfr SSPCON2; typedef struct { unsigned SEN : 1; unsigned RSEN : 1; unsigned PEN : 1; unsigned RCEN : 1; unsigned ACKEN : 1; unsigned ACKDT : 1; unsigned ACKSTAT : 1; unsigned GCEN : 1; } __SSPCON2bits_t; extern __at(0x0216) volatile __SSPCON2bits_t SSPCON2bits; #define _SSPCON2_SEN 0x01 #define _SSPCON2_RSEN 0x02 #define _SSPCON2_PEN 0x04 #define _SSPCON2_RCEN 0x08 #define _SSPCON2_ACKEN 0x10 #define _SSPCON2_ACKDT 0x20 #define _SSPCON2_ACKSTAT 0x40 #define _SSPCON2_GCEN 0x80 //============================================================================== //============================================================================== // SSP1CON3 Bits extern __at(0x0217) __sfr SSP1CON3; typedef struct { unsigned DHEN : 1; unsigned AHEN : 1; unsigned SBCDE : 1; unsigned SDAHT : 1; unsigned BOEN : 1; unsigned SCIE : 1; unsigned PCIE : 1; unsigned ACKTIM : 1; } __SSP1CON3bits_t; extern __at(0x0217) volatile __SSP1CON3bits_t SSP1CON3bits; #define _DHEN 0x01 #define _AHEN 0x02 #define _SBCDE 0x04 #define _SDAHT 0x08 #define _BOEN 0x10 #define _SCIE 0x20 #define _PCIE 0x40 #define _ACKTIM 0x80 //============================================================================== //============================================================================== // SSPCON3 Bits extern __at(0x0217) __sfr SSPCON3; typedef struct { unsigned DHEN : 1; unsigned AHEN : 1; unsigned SBCDE : 1; unsigned SDAHT : 1; unsigned BOEN : 1; unsigned SCIE : 1; unsigned PCIE : 1; unsigned ACKTIM : 1; } __SSPCON3bits_t; extern __at(0x0217) volatile __SSPCON3bits_t SSPCON3bits; #define _SSPCON3_DHEN 0x01 #define _SSPCON3_AHEN 0x02 #define _SSPCON3_SBCDE 0x04 #define _SSPCON3_SDAHT 0x08 #define _SSPCON3_BOEN 0x10 #define _SSPCON3_SCIE 0x20 #define _SSPCON3_PCIE 0x40 #define _SSPCON3_ACKTIM 0x80 //============================================================================== extern __at(0x0291) __sfr CCPR1; extern __at(0x0291) __sfr CCPR1L; extern __at(0x0292) __sfr CCPR1H; //============================================================================== // CCP1CON Bits extern __at(0x0293) __sfr CCP1CON; typedef union { struct { unsigned CCP1M0 : 1; unsigned CCP1M1 : 1; unsigned CCP1M2 : 1; unsigned CCP1M3 : 1; unsigned DC1B0 : 1; unsigned DC1B1 : 1; unsigned P1M0 : 1; unsigned P1M1 : 1; }; struct { unsigned CCP1M : 4; unsigned : 4; }; struct { unsigned : 4; unsigned DC1B : 2; unsigned : 2; }; struct { unsigned : 6; unsigned P1M : 2; }; } __CCP1CONbits_t; extern __at(0x0293) volatile __CCP1CONbits_t CCP1CONbits; #define _CCP1M0 0x01 #define _CCP1M1 0x02 #define _CCP1M2 0x04 #define _CCP1M3 0x08 #define _DC1B0 0x10 #define _DC1B1 0x20 #define _P1M0 0x40 #define _P1M1 0x80 //============================================================================== //============================================================================== // PWM1CON Bits extern __at(0x0294) __sfr PWM1CON; typedef union { struct { unsigned P1DC0 : 1; unsigned P1DC1 : 1; unsigned P1DC2 : 1; unsigned P1DC3 : 1; unsigned P1DC4 : 1; unsigned P1DC5 : 1; unsigned P1DC6 : 1; unsigned P1RSEN : 1; }; struct { unsigned P1DC : 7; unsigned : 1; }; } __PWM1CONbits_t; extern __at(0x0294) volatile __PWM1CONbits_t PWM1CONbits; #define _P1DC0 0x01 #define _P1DC1 0x02 #define _P1DC2 0x04 #define _P1DC3 0x08 #define _P1DC4 0x10 #define _P1DC5 0x20 #define _P1DC6 0x40 #define _P1RSEN 0x80 //============================================================================== //============================================================================== // CCP1AS Bits extern __at(0x0295) __sfr CCP1AS; typedef union { struct { unsigned PSS1BD0 : 1; unsigned PSS1BD1 : 1; unsigned PSS1AC0 : 1; unsigned PSS1AC1 : 1; unsigned CCP1AS0 : 1; unsigned CCP1AS1 : 1; unsigned CCP1AS2 : 1; unsigned CCP1ASE : 1; }; struct { unsigned PSS1BD : 2; unsigned : 6; }; struct { unsigned : 2; unsigned PSS1AC : 2; unsigned : 4; }; struct { unsigned : 4; unsigned CCP1AS : 3; unsigned : 1; }; } __CCP1ASbits_t; extern __at(0x0295) volatile __CCP1ASbits_t CCP1ASbits; #define _PSS1BD0 0x01 #define _PSS1BD1 0x02 #define _PSS1AC0 0x04 #define _PSS1AC1 0x08 #define _CCP1AS0 0x10 #define _CCP1AS1 0x20 #define _CCP1AS2 0x40 #define _CCP1ASE 0x80 //============================================================================== //============================================================================== // ECCP1AS Bits extern __at(0x0295) __sfr ECCP1AS; typedef union { struct { unsigned PSS1BD0 : 1; unsigned PSS1BD1 : 1; unsigned PSS1AC0 : 1; unsigned PSS1AC1 : 1; unsigned CCP1AS0 : 1; unsigned CCP1AS1 : 1; unsigned CCP1AS2 : 1; unsigned CCP1ASE : 1; }; struct { unsigned PSS1BD : 2; unsigned : 6; }; struct { unsigned : 2; unsigned PSS1AC : 2; unsigned : 4; }; struct { unsigned : 4; unsigned CCP1AS : 3; unsigned : 1; }; } __ECCP1ASbits_t; extern __at(0x0295) volatile __ECCP1ASbits_t ECCP1ASbits; #define _ECCP1AS_PSS1BD0 0x01 #define _ECCP1AS_PSS1BD1 0x02 #define _ECCP1AS_PSS1AC0 0x04 #define _ECCP1AS_PSS1AC1 0x08 #define _ECCP1AS_CCP1AS0 0x10 #define _ECCP1AS_CCP1AS1 0x20 #define _ECCP1AS_CCP1AS2 0x40 #define _ECCP1AS_CCP1ASE 0x80 //============================================================================== //============================================================================== // PSTR1CON Bits extern __at(0x0296) __sfr PSTR1CON; typedef struct { unsigned STR1A : 1; unsigned STR1B : 1; unsigned STR1C : 1; unsigned STR1D : 1; unsigned STR1SYNC : 1; unsigned : 1; unsigned : 1; unsigned : 1; } __PSTR1CONbits_t; extern __at(0x0296) volatile __PSTR1CONbits_t PSTR1CONbits; #define _STR1A 0x01 #define _STR1B 0x02 #define _STR1C 0x04 #define _STR1D 0x08 #define _STR1SYNC 0x10 //============================================================================== //============================================================================== // IOCBP Bits extern __at(0x0394) __sfr IOCBP; typedef struct { unsigned IOCBP0 : 1; unsigned IOCBP1 : 1; unsigned IOCBP2 : 1; unsigned IOCBP3 : 1; unsigned IOCBP4 : 1; unsigned IOCBP5 : 1; unsigned IOCBP6 : 1; unsigned IOCBP7 : 1; } __IOCBPbits_t; extern __at(0x0394) volatile __IOCBPbits_t IOCBPbits; #define _IOCBP0 0x01 #define _IOCBP1 0x02 #define _IOCBP2 0x04 #define _IOCBP3 0x08 #define _IOCBP4 0x10 #define _IOCBP5 0x20 #define _IOCBP6 0x40 #define _IOCBP7 0x80 //============================================================================== //============================================================================== // IOCBN Bits extern __at(0x0395) __sfr IOCBN; typedef struct { unsigned IOCBN0 : 1; unsigned IOCBN1 : 1; unsigned IOCBN2 : 1; unsigned IOCBN3 : 1; unsigned IOCBN4 : 1; unsigned IOCBN5 : 1; unsigned IOCBN6 : 1; unsigned IOCBN7 : 1; } __IOCBNbits_t; extern __at(0x0395) volatile __IOCBNbits_t IOCBNbits; #define _IOCBN0 0x01 #define _IOCBN1 0x02 #define _IOCBN2 0x04 #define _IOCBN3 0x08 #define _IOCBN4 0x10 #define _IOCBN5 0x20 #define _IOCBN6 0x40 #define _IOCBN7 0x80 //============================================================================== //============================================================================== // IOCBF Bits extern __at(0x0396) __sfr IOCBF; typedef struct { unsigned IOCBF0 : 1; unsigned IOCBF1 : 1; unsigned IOCBF2 : 1; unsigned IOCBF3 : 1; unsigned IOCBF4 : 1; unsigned IOCBF5 : 1; unsigned IOCBF6 : 1; unsigned IOCBF7 : 1; } __IOCBFbits_t; extern __at(0x0396) volatile __IOCBFbits_t IOCBFbits; #define _IOCBF0 0x01 #define _IOCBF1 0x02 #define _IOCBF2 0x04 #define _IOCBF3 0x08 #define _IOCBF4 0x10 #define _IOCBF5 0x20 #define _IOCBF6 0x40 #define _IOCBF7 0x80 //============================================================================== //============================================================================== // CLKRCON Bits extern __at(0x039A) __sfr CLKRCON; typedef union { struct { unsigned CLKRDIV0 : 1; unsigned CLKRDIV1 : 1; unsigned CLKRDIV2 : 1; unsigned CLKRDC0 : 1; unsigned CLKRDC1 : 1; unsigned CLKRSLR : 1; unsigned CLKROE : 1; unsigned CLKREN : 1; }; struct { unsigned CLKRDIV : 3; unsigned : 5; }; struct { unsigned : 3; unsigned CLKRDC : 2; unsigned : 3; }; } __CLKRCONbits_t; extern __at(0x039A) volatile __CLKRCONbits_t CLKRCONbits; #define _CLKRDIV0 0x01 #define _CLKRDIV1 0x02 #define _CLKRDIV2 0x04 #define _CLKRDC0 0x08 #define _CLKRDC1 0x10 #define _CLKRSLR 0x20 #define _CLKROE 0x40 #define _CLKREN 0x80 //============================================================================== //============================================================================== // MDCON Bits extern __at(0x039C) __sfr MDCON; typedef struct { unsigned MDBIT : 1; unsigned : 1; unsigned : 1; unsigned MDOUT : 1; unsigned MDOPOL : 1; unsigned MDSLR : 1; unsigned MDOE : 1; unsigned MDEN : 1; } __MDCONbits_t; extern __at(0x039C) volatile __MDCONbits_t MDCONbits; #define _MDBIT 0x01 #define _MDOUT 0x08 #define _MDOPOL 0x10 #define _MDSLR 0x20 #define _MDOE 0x40 #define _MDEN 0x80 //============================================================================== //============================================================================== // MDSRC Bits extern __at(0x039D) __sfr MDSRC; typedef union { struct { unsigned MDMS0 : 1; unsigned MDMS1 : 1; unsigned MDMS2 : 1; unsigned MDMS3 : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned MDMSODIS : 1; }; struct { unsigned MDMS : 4; unsigned : 4; }; } __MDSRCbits_t; extern __at(0x039D) volatile __MDSRCbits_t MDSRCbits; #define _MDMS0 0x01 #define _MDMS1 0x02 #define _MDMS2 0x04 #define _MDMS3 0x08 #define _MDMSODIS 0x80 //============================================================================== //============================================================================== // MDCARL Bits extern __at(0x039E) __sfr MDCARL; typedef union { struct { unsigned MDCL0 : 1; unsigned MDCL1 : 1; unsigned MDCL2 : 1; unsigned MDCL3 : 1; unsigned : 1; unsigned MDCLSYNC : 1; unsigned MDCLPOL : 1; unsigned MDCLODIS : 1; }; struct { unsigned MDCL : 4; unsigned : 4; }; } __MDCARLbits_t; extern __at(0x039E) volatile __MDCARLbits_t MDCARLbits; #define _MDCL0 0x01 #define _MDCL1 0x02 #define _MDCL2 0x04 #define _MDCL3 0x08 #define _MDCLSYNC 0x20 #define _MDCLPOL 0x40 #define _MDCLODIS 0x80 //============================================================================== //============================================================================== // MDCARH Bits extern __at(0x039F) __sfr MDCARH; typedef union { struct { unsigned MDCH0 : 1; unsigned MDCH1 : 1; unsigned MDCH2 : 1; unsigned MDCH3 : 1; unsigned : 1; unsigned MDCHSYNC : 1; unsigned MDCHPOL : 1; unsigned MDCHODIS : 1; }; struct { unsigned MDCH : 4; unsigned : 4; }; } __MDCARHbits_t; extern __at(0x039F) volatile __MDCARHbits_t MDCARHbits; #define _MDCH0 0x01 #define _MDCH1 0x02 #define _MDCH2 0x04 #define _MDCH3 0x08 #define _MDCHSYNC 0x20 #define _MDCHPOL 0x40 #define _MDCHODIS 0x80 //============================================================================== //============================================================================== // STATUS_SHAD Bits extern __at(0x0FE4) __sfr STATUS_SHAD; typedef struct { unsigned C_SHAD : 1; unsigned DC_SHAD : 1; unsigned Z_SHAD : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; unsigned : 1; } __STATUS_SHADbits_t; extern __at(0x0FE4) volatile __STATUS_SHADbits_t STATUS_SHADbits; #define _C_SHAD 0x01 #define _DC_SHAD 0x02 #define _Z_SHAD 0x04 //============================================================================== extern __at(0x0FE5) __sfr WREG_SHAD; extern __at(0x0FE6) __sfr BSR_SHAD; extern __at(0x0FE7) __sfr PCLATH_SHAD; extern __at(0x0FE8) __sfr FSR0L_SHAD; extern __at(0x0FE9) __sfr FSR0H_SHAD; extern __at(0x0FEA) __sfr FSR1L_SHAD; extern __at(0x0FEB) __sfr FSR1H_SHAD; extern __at(0x0FED) __sfr STKPTR; extern __at(0x0FEE) __sfr TOSL; extern __at(0x0FEF) __sfr TOSH; //============================================================================== // // Configuration Bits // //============================================================================== #define _CONFIG1 0x8007 #define _CONFIG2 0x8008 //----------------------------- CONFIG1 Options ------------------------------- #define _FOSC_LP 0x3FF8 // LP Oscillator, Low-power crystal connected between OSC1 and OSC2 pins. #define _FOSC_XT 0x3FF9 // XT Oscillator, Crystal/resonator connected between OSC1 and OSC2 pins. #define _FOSC_HS 0x3FFA // HS Oscillator, High-speed crystal/resonator connected between OSC1 and OSC2 pins. #define _FOSC_EXTRC 0x3FFB // EXTRC oscillator: External RC circuit connected to CLKIN pin. #define _FOSC_INTOSC 0x3FFC // INTOSC oscillator: I/O function on CLKIN pin. #define _FOSC_ECL 0x3FFD // ECL, External Clock, Low Power Mode (0-0.5 MHz): device clock supplied to CLKIN pin. #define _FOSC_ECM 0x3FFE // ECM, External Clock, Medium Power Mode (0.5-4 MHz): device clock supplied to CLKIN pin. #define _FOSC_ECH 0x3FFF // ECH, External Clock, High Power Mode (4-32 MHz): device clock supplied to CLKIN pin. #define _WDTE_OFF 0x3FE7 // WDT disabled. #define _WDTE_SWDTEN 0x3FEF // WDT controlled by the SWDTEN bit in the WDTCON register. #define _WDTE_NSLEEP 0x3FF7 // WDT enabled while running and disabled in Sleep. #define _WDTE_ON 0x3FFF // WDT enabled. #define _PWRTE_ON 0x3FDF // PWRT enabled. #define _PWRTE_OFF 0x3FFF // PWRT disabled. #define _MCLRE_OFF 0x3FBF // MCLR/VPP pin function is digital input. #define _MCLRE_ON 0x3FFF // MCLR/VPP pin function is MCLR. #define _CP_ON 0x3F7F // Program memory code protection is enabled. #define _CP_OFF 0x3FFF // Program memory code protection is disabled. #define _CPD_ON 0x3EFF // Data memory code protection is enabled. #define _CPD_OFF 0x3FFF // Data memory code protection is disabled. #define _BOREN_OFF 0x39FF // Brown-out Reset disabled. #define _BOREN_SBODEN 0x3BFF // Brown-out Reset controlled by the SBOREN bit in the BORCON register. #define _BOREN_NSLEEP 0x3DFF // Brown-out Reset enabled while running and disabled in Sleep. #define _BOREN_ON 0x3FFF // Brown-out Reset enabled. #define _CLKOUTEN_ON 0x37FF // CLKOUT function is enabled on the CLKOUT pin. #define _CLKOUTEN_OFF 0x3FFF // CLKOUT function is disabled. I/O or oscillator function on the CLKOUT pin. #define _IESO_OFF 0x2FFF // Internal/External Switchover mode is disabled. #define _IESO_ON 0x3FFF // Internal/External Switchover mode is enabled. #define _FCMEN_OFF 0x1FFF // Fail-Safe Clock Monitor is disabled. #define _FCMEN_ON 0x3FFF // Fail-Safe Clock Monitor is enabled. //----------------------------- CONFIG2 Options ------------------------------- #define _WRT_ALL 0x3FFC // 000h to 7FFh write protected, no addresses may be modified by EECON control. #define _WRT_HALF 0x3FFD // 000h to 3FFh write protected, 400h to 7FFh may be modified by EECON control. #define _WRT_BOOT 0x3FFE // 000h to 1FFh write protected, 200h to 7FFh may be modified by EECON control. #define _WRT_OFF 0x3FFF // Write protection off. #define _PLLEN_OFF 0x3EFF // 4x PLL disabled. #define _PLLEN_ON 0x3FFF // 4x PLL enabled. #define _STVREN_OFF 0x3DFF // Stack Overflow or Underflow will not cause a Reset. #define _STVREN_ON 0x3FFF // Stack Overflow or Underflow will cause a Reset. #define _BORV_HI 0x3BFF // Brown-out Reset Voltage (Vbor), high trip point selected. #define _BORV_LO 0x3FFF // Brown-out Reset Voltage (Vbor), low trip point selected. #define _BORV_19 0x3FFF // Brown-out Reset Voltage (Vbor), low trip point selected. #define _DEBUG_ON 0x2FFF // In-Circuit Debugger enabled, ICSPCLK and ICSPDAT are dedicated to the debugger. #define _DEBUG_OFF 0x3FFF // In-Circuit Debugger disabled, ICSPCLK and ICSPDAT are general purpose I/O pins. #define _LVP_OFF 0x1FFF // High-voltage on MCLR/VPP must be used for programming. #define _LVP_ON 0x3FFF // Low-voltage programming enabled. //============================================================================== #define _DEVID1 0x8006 #define _IDLOC0 0x8000 #define _IDLOC1 0x8001 #define _IDLOC2 0x8002 #define _IDLOC3 0x8003 //============================================================================== #ifndef NO_BIT_DEFINES #define ADON ADCON0bits.ADON // bit 0 #define GO_NOT_DONE ADCON0bits.GO_NOT_DONE // bit 1, shadows bit in ADCON0bits #define ADGO ADCON0bits.ADGO // bit 1, shadows bit in ADCON0bits #define GO ADCON0bits.GO // bit 1, shadows bit in ADCON0bits #define CHS0 ADCON0bits.CHS0 // bit 2 #define CHS1 ADCON0bits.CHS1 // bit 3 #define CHS2 ADCON0bits.CHS2 // bit 4 #define CHS3 ADCON0bits.CHS3 // bit 5 #define CHS4 ADCON0bits.CHS4 // bit 6 #define ADPREF0 ADCON1bits.ADPREF0 // bit 0 #define ADPREF1 ADCON1bits.ADPREF1 // bit 1 #define ADNREF ADCON1bits.ADNREF // bit 2 #define ADCS0 ADCON1bits.ADCS0 // bit 4 #define ADCS1 ADCON1bits.ADCS1 // bit 5 #define ADCS2 ADCON1bits.ADCS2 // bit 6 #define ADFM ADCON1bits.ADFM // bit 7 #define ANSA0 ANSELAbits.ANSA0 // bit 0 #define ANSA1 ANSELAbits.ANSA1 // bit 1 #define ANSA2 ANSELAbits.ANSA2 // bit 2 #define ANSA3 ANSELAbits.ANSA3 // bit 3 #define ANSA4 ANSELAbits.ANSA4 // bit 4 #define ANSB1 ANSELBbits.ANSB1 // bit 1 #define ANSB2 ANSELBbits.ANSB2 // bit 2 #define ANSB3 ANSELBbits.ANSB3 // bit 3 #define ANSB4 ANSELBbits.ANSB4 // bit 4 #define ANSB5 ANSELBbits.ANSB5 // bit 5 #define ANSB6 ANSELBbits.ANSB6 // bit 6 #define ANSB7 ANSELBbits.ANSB7 // bit 7 #define CCP1SEL APFCON0bits.CCP1SEL // bit 0 #define P1CSEL APFCON0bits.P1CSEL // bit 1 #define P1DSEL APFCON0bits.P1DSEL // bit 2 #define SS1SEL APFCON0bits.SS1SEL // bit 5 #define SDO1SEL APFCON0bits.SDO1SEL // bit 6 #define RXDTSEL APFCON0bits.RXDTSEL // bit 7 #define TXCKSEL APFCON1bits.TXCKSEL // bit 0 #define ABDEN BAUDCONbits.ABDEN // bit 0 #define WUE BAUDCONbits.WUE // bit 1 #define BRG16 BAUDCONbits.BRG16 // bit 3 #define SCKP BAUDCONbits.SCKP // bit 4 #define RCIDL BAUDCONbits.RCIDL // bit 6 #define ABDOVF BAUDCONbits.ABDOVF // bit 7 #define BORRDY BORCONbits.BORRDY // bit 0 #define SBOREN BORCONbits.SBOREN // bit 7 #define BSR0 BSRbits.BSR0 // bit 0 #define BSR1 BSRbits.BSR1 // bit 1 #define BSR2 BSRbits.BSR2 // bit 2 #define BSR3 BSRbits.BSR3 // bit 3 #define BSR4 BSRbits.BSR4 // bit 4 #define PSS1BD0 CCP1ASbits.PSS1BD0 // bit 0 #define PSS1BD1 CCP1ASbits.PSS1BD1 // bit 1 #define PSS1AC0 CCP1ASbits.PSS1AC0 // bit 2 #define PSS1AC1 CCP1ASbits.PSS1AC1 // bit 3 #define CCP1AS0 CCP1ASbits.CCP1AS0 // bit 4 #define CCP1AS1 CCP1ASbits.CCP1AS1 // bit 5 #define CCP1AS2 CCP1ASbits.CCP1AS2 // bit 6 #define CCP1ASE CCP1ASbits.CCP1ASE // bit 7 #define CCP1M0 CCP1CONbits.CCP1M0 // bit 0 #define CCP1M1 CCP1CONbits.CCP1M1 // bit 1 #define CCP1M2 CCP1CONbits.CCP1M2 // bit 2 #define CCP1M3 CCP1CONbits.CCP1M3 // bit 3 #define DC1B0 CCP1CONbits.DC1B0 // bit 4 #define DC1B1 CCP1CONbits.DC1B1 // bit 5 #define P1M0 CCP1CONbits.P1M0 // bit 6 #define P1M1 CCP1CONbits.P1M1 // bit 7 #define CLKRDIV0 CLKRCONbits.CLKRDIV0 // bit 0 #define CLKRDIV1 CLKRCONbits.CLKRDIV1 // bit 1 #define CLKRDIV2 CLKRCONbits.CLKRDIV2 // bit 2 #define CLKRDC0 CLKRCONbits.CLKRDC0 // bit 3 #define CLKRDC1 CLKRCONbits.CLKRDC1 // bit 4 #define CLKRSLR CLKRCONbits.CLKRSLR // bit 5 #define CLKROE CLKRCONbits.CLKROE // bit 6 #define CLKREN CLKRCONbits.CLKREN // bit 7 #define C1SYNC CM1CON0bits.C1SYNC // bit 0 #define C1HYS CM1CON0bits.C1HYS // bit 1 #define C1SP CM1CON0bits.C1SP // bit 2 #define C1POL CM1CON0bits.C1POL // bit 4 #define C1OE CM1CON0bits.C1OE // bit 5 #define C1OUT CM1CON0bits.C1OUT // bit 6 #define C1ON CM1CON0bits.C1ON // bit 7 #define C1NCH0 CM1CON1bits.C1NCH0 // bit 0 #define C1NCH1 CM1CON1bits.C1NCH1 // bit 1 #define C1PCH0 CM1CON1bits.C1PCH0 // bit 4 #define C1PCH1 CM1CON1bits.C1PCH1 // bit 5 #define C1INTN CM1CON1bits.C1INTN // bit 6 #define C1INTP CM1CON1bits.C1INTP // bit 7 #define C2SYNC CM2CON0bits.C2SYNC // bit 0 #define C2HYS CM2CON0bits.C2HYS // bit 1 #define C2SP CM2CON0bits.C2SP // bit 2 #define C2POL CM2CON0bits.C2POL // bit 4 #define C2OE CM2CON0bits.C2OE // bit 5 #define C2OUT CM2CON0bits.C2OUT // bit 6 #define C2ON CM2CON0bits.C2ON // bit 7 #define C2NCH0 CM2CON1bits.C2NCH0 // bit 0 #define C2NCH1 CM2CON1bits.C2NCH1 // bit 1 #define C2PCH0 CM2CON1bits.C2PCH0 // bit 4 #define C2PCH1 CM2CON1bits.C2PCH1 // bit 5 #define C2INTN CM2CON1bits.C2INTN // bit 6 #define C2INTP CM2CON1bits.C2INTP // bit 7 #define MC1OUT CMOUTbits.MC1OUT // bit 0 #define MC2OUT CMOUTbits.MC2OUT // bit 1 #define T0XCS CPSCON0bits.T0XCS // bit 0 #define CPSOUT CPSCON0bits.CPSOUT // bit 1 #define CPSRNG0 CPSCON0bits.CPSRNG0 // bit 2 #define CPSRNG1 CPSCON0bits.CPSRNG1 // bit 3 #define CPSON CPSCON0bits.CPSON // bit 7 #define CPSCH0 CPSCON1bits.CPSCH0 // bit 0 #define CPSCH1 CPSCON1bits.CPSCH1 // bit 1 #define CPSCH2 CPSCON1bits.CPSCH2 // bit 2 #define CPSCH3 CPSCON1bits.CPSCH3 // bit 3 #define DACNSS DACCON0bits.DACNSS // bit 0 #define DACPSS0 DACCON0bits.DACPSS0 // bit 2 #define DACPSS1 DACCON0bits.DACPSS1 // bit 3 #define DACOE DACCON0bits.DACOE // bit 5 #define DACLPS DACCON0bits.DACLPS // bit 6 #define DACEN DACCON0bits.DACEN // bit 7 #define DACR0 DACCON1bits.DACR0 // bit 0 #define DACR1 DACCON1bits.DACR1 // bit 1 #define DACR2 DACCON1bits.DACR2 // bit 2 #define DACR3 DACCON1bits.DACR3 // bit 3 #define DACR4 DACCON1bits.DACR4 // bit 4 #define RD EECON1bits.RD // bit 0 #define WR EECON1bits.WR // bit 1 #define WREN EECON1bits.WREN // bit 2 #define WRERR EECON1bits.WRERR // bit 3 #define FREE EECON1bits.FREE // bit 4 #define LWLO EECON1bits.LWLO // bit 5 #define CFGS EECON1bits.CFGS // bit 6 #define EEPGD EECON1bits.EEPGD // bit 7 #define ADFVR0 FVRCONbits.ADFVR0 // bit 0 #define ADFVR1 FVRCONbits.ADFVR1 // bit 1 #define CDAFVR0 FVRCONbits.CDAFVR0 // bit 2 #define CDAFVR1 FVRCONbits.CDAFVR1 // bit 3 #define TSRNG FVRCONbits.TSRNG // bit 4 #define TSEN FVRCONbits.TSEN // bit 5 #define FVRRDY FVRCONbits.FVRRDY // bit 6 #define FVREN FVRCONbits.FVREN // bit 7 #define IOCIF INTCONbits.IOCIF // bit 0 #define INTF INTCONbits.INTF // bit 1 #define TMR0IF INTCONbits.TMR0IF // bit 2, shadows bit in INTCONbits #define T0IF INTCONbits.T0IF // bit 2, shadows bit in INTCONbits #define IOCIE INTCONbits.IOCIE // bit 3 #define INTE INTCONbits.INTE // bit 4 #define TMR0IE INTCONbits.TMR0IE // bit 5, shadows bit in INTCONbits #define T0IE INTCONbits.T0IE // bit 5, shadows bit in INTCONbits #define PEIE INTCONbits.PEIE // bit 6 #define GIE INTCONbits.GIE // bit 7 #define IOCBF0 IOCBFbits.IOCBF0 // bit 0 #define IOCBF1 IOCBFbits.IOCBF1 // bit 1 #define IOCBF2 IOCBFbits.IOCBF2 // bit 2 #define IOCBF3 IOCBFbits.IOCBF3 // bit 3 #define IOCBF4 IOCBFbits.IOCBF4 // bit 4 #define IOCBF5 IOCBFbits.IOCBF5 // bit 5 #define IOCBF6 IOCBFbits.IOCBF6 // bit 6 #define IOCBF7 IOCBFbits.IOCBF7 // bit 7 #define IOCBN0 IOCBNbits.IOCBN0 // bit 0 #define IOCBN1 IOCBNbits.IOCBN1 // bit 1 #define IOCBN2 IOCBNbits.IOCBN2 // bit 2 #define IOCBN3 IOCBNbits.IOCBN3 // bit 3 #define IOCBN4 IOCBNbits.IOCBN4 // bit 4 #define IOCBN5 IOCBNbits.IOCBN5 // bit 5 #define IOCBN6 IOCBNbits.IOCBN6 // bit 6 #define IOCBN7 IOCBNbits.IOCBN7 // bit 7 #define IOCBP0 IOCBPbits.IOCBP0 // bit 0 #define IOCBP1 IOCBPbits.IOCBP1 // bit 1 #define IOCBP2 IOCBPbits.IOCBP2 // bit 2 #define IOCBP3 IOCBPbits.IOCBP3 // bit 3 #define IOCBP4 IOCBPbits.IOCBP4 // bit 4 #define IOCBP5 IOCBPbits.IOCBP5 // bit 5 #define IOCBP6 IOCBPbits.IOCBP6 // bit 6 #define IOCBP7 IOCBPbits.IOCBP7 // bit 7 #define LATA0 LATAbits.LATA0 // bit 0 #define LATA1 LATAbits.LATA1 // bit 1 #define LATA2 LATAbits.LATA2 // bit 2 #define LATA3 LATAbits.LATA3 // bit 3 #define LATA4 LATAbits.LATA4 // bit 4 #define LATA6 LATAbits.LATA6 // bit 6 #define LATA7 LATAbits.LATA7 // bit 7 #define LATB0 LATBbits.LATB0 // bit 0 #define LATB1 LATBbits.LATB1 // bit 1 #define LATB2 LATBbits.LATB2 // bit 2 #define LATB3 LATBbits.LATB3 // bit 3 #define LATB4 LATBbits.LATB4 // bit 4 #define LATB5 LATBbits.LATB5 // bit 5 #define LATB6 LATBbits.LATB6 // bit 6 #define LATB7 LATBbits.LATB7 // bit 7 #define MDCH0 MDCARHbits.MDCH0 // bit 0 #define MDCH1 MDCARHbits.MDCH1 // bit 1 #define MDCH2 MDCARHbits.MDCH2 // bit 2 #define MDCH3 MDCARHbits.MDCH3 // bit 3 #define MDCHSYNC MDCARHbits.MDCHSYNC // bit 5 #define MDCHPOL MDCARHbits.MDCHPOL // bit 6 #define MDCHODIS MDCARHbits.MDCHODIS // bit 7 #define MDCL0 MDCARLbits.MDCL0 // bit 0 #define MDCL1 MDCARLbits.MDCL1 // bit 1 #define MDCL2 MDCARLbits.MDCL2 // bit 2 #define MDCL3 MDCARLbits.MDCL3 // bit 3 #define MDCLSYNC MDCARLbits.MDCLSYNC // bit 5 #define MDCLPOL MDCARLbits.MDCLPOL // bit 6 #define MDCLODIS MDCARLbits.MDCLODIS // bit 7 #define MDBIT MDCONbits.MDBIT // bit 0 #define MDOUT MDCONbits.MDOUT // bit 3 #define MDOPOL MDCONbits.MDOPOL // bit 4 #define MDSLR MDCONbits.MDSLR // bit 5 #define MDOE MDCONbits.MDOE // bit 6 #define MDEN MDCONbits.MDEN // bit 7 #define MDMS0 MDSRCbits.MDMS0 // bit 0 #define MDMS1 MDSRCbits.MDMS1 // bit 1 #define MDMS2 MDSRCbits.MDMS2 // bit 2 #define MDMS3 MDSRCbits.MDMS3 // bit 3 #define MDMSODIS MDSRCbits.MDMSODIS // bit 7 #define PS0 OPTION_REGbits.PS0 // bit 0 #define PS1 OPTION_REGbits.PS1 // bit 1 #define PS2 OPTION_REGbits.PS2 // bit 2 #define PSA OPTION_REGbits.PSA // bit 3 #define TMR0SE OPTION_REGbits.TMR0SE // bit 4, shadows bit in OPTION_REGbits #define T0SE OPTION_REGbits.T0SE // bit 4, shadows bit in OPTION_REGbits #define TMR0CS OPTION_REGbits.TMR0CS // bit 5, shadows bit in OPTION_REGbits #define T0CS OPTION_REGbits.T0CS // bit 5, shadows bit in OPTION_REGbits #define INTEDG OPTION_REGbits.INTEDG // bit 6 #define NOT_WPUEN OPTION_REGbits.NOT_WPUEN // bit 7 #define SCS0 OSCCONbits.SCS0 // bit 0 #define SCS1 OSCCONbits.SCS1 // bit 1 #define IRCF0 OSCCONbits.IRCF0 // bit 3 #define IRCF1 OSCCONbits.IRCF1 // bit 4 #define IRCF2 OSCCONbits.IRCF2 // bit 5 #define IRCF3 OSCCONbits.IRCF3 // bit 6 #define SPLLEN OSCCONbits.SPLLEN // bit 7 #define HFIOFS OSCSTATbits.HFIOFS // bit 0 #define LFIOFR OSCSTATbits.LFIOFR // bit 1 #define MFIOFR OSCSTATbits.MFIOFR // bit 2 #define HFIOFL OSCSTATbits.HFIOFL // bit 3 #define HFIOFR OSCSTATbits.HFIOFR // bit 4 #define OSTS OSCSTATbits.OSTS // bit 5 #define PLLR OSCSTATbits.PLLR // bit 6 #define T1OSCR OSCSTATbits.T1OSCR // bit 7 #define TUN0 OSCTUNEbits.TUN0 // bit 0 #define TUN1 OSCTUNEbits.TUN1 // bit 1 #define TUN2 OSCTUNEbits.TUN2 // bit 2 #define TUN3 OSCTUNEbits.TUN3 // bit 3 #define TUN4 OSCTUNEbits.TUN4 // bit 4 #define TUN5 OSCTUNEbits.TUN5 // bit 5 #define NOT_BOR PCONbits.NOT_BOR // bit 0 #define NOT_POR PCONbits.NOT_POR // bit 1 #define NOT_RI PCONbits.NOT_RI // bit 2 #define NOT_RMCLR PCONbits.NOT_RMCLR // bit 3 #define STKUNF PCONbits.STKUNF // bit 6 #define STKOVF PCONbits.STKOVF // bit 7 #define TMR1IE PIE1bits.TMR1IE // bit 0 #define TMR2IE PIE1bits.TMR2IE // bit 1 #define CCP1IE PIE1bits.CCP1IE // bit 2 #define SSP1IE PIE1bits.SSP1IE // bit 3 #define TXIE PIE1bits.TXIE // bit 4 #define RCIE PIE1bits.RCIE // bit 5 #define ADIE PIE1bits.ADIE // bit 6 #define TMR1GIE PIE1bits.TMR1GIE // bit 7 #define BCL1IE PIE2bits.BCL1IE // bit 3 #define EEIE PIE2bits.EEIE // bit 4 #define C1IE PIE2bits.C1IE // bit 5 #define C2IE PIE2bits.C2IE // bit 6 #define OSFIE PIE2bits.OSFIE // bit 7 #define TMR1IF PIR1bits.TMR1IF // bit 0 #define TMR2IF PIR1bits.TMR2IF // bit 1 #define CCP1IF PIR1bits.CCP1IF // bit 2 #define SSP1IF PIR1bits.SSP1IF // bit 3 #define TXIF PIR1bits.TXIF // bit 4 #define RCIF PIR1bits.RCIF // bit 5 #define ADIF PIR1bits.ADIF // bit 6 #define TMR1GIF PIR1bits.TMR1GIF // bit 7 #define BCL1IF PIR2bits.BCL1IF // bit 3 #define EEIF PIR2bits.EEIF // bit 4 #define C1IF PIR2bits.C1IF // bit 5 #define C2IF PIR2bits.C2IF // bit 6 #define OSFIF PIR2bits.OSFIF // bit 7 #define RA0 PORTAbits.RA0 // bit 0 #define RA1 PORTAbits.RA1 // bit 1 #define RA2 PORTAbits.RA2 // bit 2 #define RA3 PORTAbits.RA3 // bit 3 #define RA4 PORTAbits.RA4 // bit 4 #define RA5 PORTAbits.RA5 // bit 5 #define RA6 PORTAbits.RA6 // bit 6 #define RA7 PORTAbits.RA7 // bit 7 #define RB0 PORTBbits.RB0 // bit 0 #define RB1 PORTBbits.RB1 // bit 1 #define RB2 PORTBbits.RB2 // bit 2 #define RB3 PORTBbits.RB3 // bit 3 #define RB4 PORTBbits.RB4 // bit 4 #define RB5 PORTBbits.RB5 // bit 5 #define RB6 PORTBbits.RB6 // bit 6 #define RB7 PORTBbits.RB7 // bit 7 #define STR1A PSTR1CONbits.STR1A // bit 0 #define STR1B PSTR1CONbits.STR1B // bit 1 #define STR1C PSTR1CONbits.STR1C // bit 2 #define STR1D PSTR1CONbits.STR1D // bit 3 #define STR1SYNC PSTR1CONbits.STR1SYNC // bit 4 #define P1DC0 PWM1CONbits.P1DC0 // bit 0 #define P1DC1 PWM1CONbits.P1DC1 // bit 1 #define P1DC2 PWM1CONbits.P1DC2 // bit 2 #define P1DC3 PWM1CONbits.P1DC3 // bit 3 #define P1DC4 PWM1CONbits.P1DC4 // bit 4 #define P1DC5 PWM1CONbits.P1DC5 // bit 5 #define P1DC6 PWM1CONbits.P1DC6 // bit 6 #define P1RSEN PWM1CONbits.P1RSEN // bit 7 #define RX9D RCSTAbits.RX9D // bit 0 #define OERR RCSTAbits.OERR // bit 1 #define FERR RCSTAbits.FERR // bit 2 #define ADDEN RCSTAbits.ADDEN // bit 3 #define CREN RCSTAbits.CREN // bit 4 #define SREN RCSTAbits.SREN // bit 5 #define RX9 RCSTAbits.RX9 // bit 6 #define SPEN RCSTAbits.SPEN // bit 7 #define SRPR SRCON0bits.SRPR // bit 0 #define SRPS SRCON0bits.SRPS // bit 1 #define SRNQEN SRCON0bits.SRNQEN // bit 2 #define SRQEN SRCON0bits.SRQEN // bit 3 #define SRCLK0 SRCON0bits.SRCLK0 // bit 4 #define SRCLK1 SRCON0bits.SRCLK1 // bit 5 #define SRCLK2 SRCON0bits.SRCLK2 // bit 6 #define SRLEN SRCON0bits.SRLEN // bit 7 #define SRRC1E SRCON1bits.SRRC1E // bit 0 #define SRRC2E SRCON1bits.SRRC2E // bit 1 #define SRRCKE SRCON1bits.SRRCKE // bit 2 #define SRRPE SRCON1bits.SRRPE // bit 3 #define SRSC1E SRCON1bits.SRSC1E // bit 4 #define SRSC2E SRCON1bits.SRSC2E // bit 5 #define SRSCKE SRCON1bits.SRSCKE // bit 6 #define SRSPE SRCON1bits.SRSPE // bit 7 #define SSPM0 SSP1CON1bits.SSPM0 // bit 0 #define SSPM1 SSP1CON1bits.SSPM1 // bit 1 #define SSPM2 SSP1CON1bits.SSPM2 // bit 2 #define SSPM3 SSP1CON1bits.SSPM3 // bit 3 #define CKP SSP1CON1bits.CKP // bit 4 #define SSPEN SSP1CON1bits.SSPEN // bit 5 #define SSPOV SSP1CON1bits.SSPOV // bit 6 #define WCOL SSP1CON1bits.WCOL // bit 7 #define SEN SSP1CON2bits.SEN // bit 0 #define RSEN SSP1CON2bits.RSEN // bit 1 #define PEN SSP1CON2bits.PEN // bit 2 #define RCEN SSP1CON2bits.RCEN // bit 3 #define ACKEN SSP1CON2bits.ACKEN // bit 4 #define ACKDT SSP1CON2bits.ACKDT // bit 5 #define ACKSTAT SSP1CON2bits.ACKSTAT // bit 6 #define GCEN SSP1CON2bits.GCEN // bit 7 #define DHEN SSP1CON3bits.DHEN // bit 0 #define AHEN SSP1CON3bits.AHEN // bit 1 #define SBCDE SSP1CON3bits.SBCDE // bit 2 #define SDAHT SSP1CON3bits.SDAHT // bit 3 #define BOEN SSP1CON3bits.BOEN // bit 4 #define SCIE SSP1CON3bits.SCIE // bit 5 #define PCIE SSP1CON3bits.PCIE // bit 6 #define ACKTIM SSP1CON3bits.ACKTIM // bit 7 #define BF SSP1STATbits.BF // bit 0 #define UA SSP1STATbits.UA // bit 1 #define R_NOT_W SSP1STATbits.R_NOT_W // bit 2 #define S SSP1STATbits.S // bit 3 #define P SSP1STATbits.P // bit 4 #define D_NOT_A SSP1STATbits.D_NOT_A // bit 5 #define CKE SSP1STATbits.CKE // bit 6 #define SMP SSP1STATbits.SMP // bit 7 #define C STATUSbits.C // bit 0 #define DC STATUSbits.DC // bit 1 #define Z STATUSbits.Z // bit 2 #define NOT_PD STATUSbits.NOT_PD // bit 3 #define NOT_TO STATUSbits.NOT_TO // bit 4 #define C_SHAD STATUS_SHADbits.C_SHAD // bit 0 #define DC_SHAD STATUS_SHADbits.DC_SHAD // bit 1 #define Z_SHAD STATUS_SHADbits.Z_SHAD // bit 2 #define TMR1ON T1CONbits.TMR1ON // bit 0 #define NOT_T1SYNC T1CONbits.NOT_T1SYNC // bit 2 #define T1OSCEN T1CONbits.T1OSCEN // bit 3 #define T1CKPS0 T1CONbits.T1CKPS0 // bit 4 #define T1CKPS1 T1CONbits.T1CKPS1 // bit 5 #define TMR1CS0 T1CONbits.TMR1CS0 // bit 6 #define TMR1CS1 T1CONbits.TMR1CS1 // bit 7 #define T1GSS0 T1GCONbits.T1GSS0 // bit 0 #define T1GSS1 T1GCONbits.T1GSS1 // bit 1 #define T1GVAL T1GCONbits.T1GVAL // bit 2 #define T1GGO T1GCONbits.T1GGO // bit 3 #define T1GSPM T1GCONbits.T1GSPM // bit 4 #define T1GTM T1GCONbits.T1GTM // bit 5 #define T1GPOL T1GCONbits.T1GPOL // bit 6 #define TMR1GE T1GCONbits.TMR1GE // bit 7 #define T2CKPS0 T2CONbits.T2CKPS0 // bit 0 #define T2CKPS1 T2CONbits.T2CKPS1 // bit 1 #define TMR2ON T2CONbits.TMR2ON // bit 2 #define T2OUTPS0 T2CONbits.T2OUTPS0 // bit 3 #define T2OUTPS1 T2CONbits.T2OUTPS1 // bit 4 #define T2OUTPS2 T2CONbits.T2OUTPS2 // bit 5 #define T2OUTPS3 T2CONbits.T2OUTPS3 // bit 6 #define TRISA0 TRISAbits.TRISA0 // bit 0 #define TRISA1 TRISAbits.TRISA1 // bit 1 #define TRISA2 TRISAbits.TRISA2 // bit 2 #define TRISA3 TRISAbits.TRISA3 // bit 3 #define TRISA4 TRISAbits.TRISA4 // bit 4 #define TRISA5 TRISAbits.TRISA5 // bit 5 #define TRISA6 TRISAbits.TRISA6 // bit 6 #define TRISA7 TRISAbits.TRISA7 // bit 7 #define TRISB0 TRISBbits.TRISB0 // bit 0 #define TRISB1 TRISBbits.TRISB1 // bit 1 #define TRISB2 TRISBbits.TRISB2 // bit 2 #define TRISB3 TRISBbits.TRISB3 // bit 3 #define TRISB4 TRISBbits.TRISB4 // bit 4 #define TRISB5 TRISBbits.TRISB5 // bit 5 #define TRISB6 TRISBbits.TRISB6 // bit 6 #define TRISB7 TRISBbits.TRISB7 // bit 7 #define TX9D TXSTAbits.TX9D // bit 0 #define TRMT TXSTAbits.TRMT // bit 1 #define BRGH TXSTAbits.BRGH // bit 2 #define SENDB TXSTAbits.SENDB // bit 3 #define SYNC TXSTAbits.SYNC // bit 4 #define TXEN TXSTAbits.TXEN // bit 5 #define TX9 TXSTAbits.TX9 // bit 6 #define CSRC TXSTAbits.CSRC // bit 7 #define SWDTEN WDTCONbits.SWDTEN // bit 0 #define WDTPS0 WDTCONbits.WDTPS0 // bit 1 #define WDTPS1 WDTCONbits.WDTPS1 // bit 2 #define WDTPS2 WDTCONbits.WDTPS2 // bit 3 #define WDTPS3 WDTCONbits.WDTPS3 // bit 4 #define WDTPS4 WDTCONbits.WDTPS4 // bit 5 #define WPUA5 WPUAbits.WPUA5 // bit 5 #define WPUB0 WPUBbits.WPUB0 // bit 0 #define WPUB1 WPUBbits.WPUB1 // bit 1 #define WPUB2 WPUBbits.WPUB2 // bit 2 #define WPUB3 WPUBbits.WPUB3 // bit 3 #define WPUB4 WPUBbits.WPUB4 // bit 4 #define WPUB5 WPUBbits.WPUB5 // bit 5 #define WPUB6 WPUBbits.WPUB6 // bit 6 #define WPUB7 WPUBbits.WPUB7 // bit 7 #endif // #ifndef NO_BIT_DEFINES #endif // #ifndef __PIC16LF1826_H__
PinguinoIDE/pinguino-compilers
windows64/p8/share/sdcc/non-free/include/pic14/pic16lf1826.h
C
gpl-2.0
112,492
/* -*- c++ -*- ---------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://www.lammps.org/ Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #ifdef KSPACE_CLASS // clang-format off KSpaceStyle(pppm/dielectric,PPPMDielectric); // clang-format on #else #ifndef LMP_PPPM_DIELECTRIC_H #define LMP_PPPM_DIELECTRIC_H #include "pppm.h" namespace LAMMPS_NS { class PPPMDielectric : public PPPM { public: PPPMDielectric(class LAMMPS *); virtual ~PPPMDielectric(); virtual void compute(int, int); double **efield; double *phi; int potflag; // 1/0 if per-atom electrostatic potential phi is needed void qsum_qsq(); protected: void slabcorr(); void fieldforce_ik(); void fieldforce_ad(); class AtomVecDielectric *avec; }; } // namespace LAMMPS_NS #endif #endif /* ERROR/WARNING messages: E: Illegal ... command Self-explanatory. Check the input script syntax and compare to the documentation for the command. You can use -echo screen as a command-line option when running LAMMPS to see the offending line. */
jeremiahyan/lammps
src/DIELECTRIC/pppm_dielectric.h
C
gpl-2.0
1,529
<?php /*************************************** *$File: app/balance/BalanceMemcache.php *$Description: *$Author: lideqiang *$Time: 2015/4/23 ****************************************/ class BalanceMemcache extends BalanceBase { }
jingwug/tester
phalcon2/improve/app/balance/BalanceMemcache.php
PHP
gpl-2.0
230
/*- * Copyright (c) 2000 Brian Somers <brian@Awfulhak.org> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: src/usr.sbin/ppp/netgraph.h,v 1.3.26.1 2010/12/21 17:10:29 kensmith Exp $ */ struct physical; struct device; #define DEF_NGCDDELAY 5 /* Default ``set cd'' value */ extern struct device *ng_Create(struct physical *); extern struct device *ng_iov2device(int, struct physical *, struct iovec *, int *, int, int *, int *); extern unsigned ng_DeviceSize(void);
rex-xxx/mt6572_x201
external/libppp/src/netgraph.h
C
gpl-2.0
1,783
<?php App::import('Helper', 'FileUpload.FileUpload'); App::import('Helper', 'Html'); App::import('Config', 'FileUpload.file_upload_settings'); class FileUploadHelperTest extends CakeTestCase { var $FileUpload = null; function startTest(){ $this->FileUpload = new FileUploadHelper(); $this->FileUpload->Html = new HtmlHelper(); } function testImage(){ $results = $this->FileUpload->image('some_image.jpg'); $this->assertEqual('some_image.jpg', $this->FileUpload->fileName); $this->assertFalse($results); //file doesn't exist } function testDefaultSettings(){ $results = $this->FileUpload->settings; $DefaultSettings = new FileUploadSettings(); $expected = $DefaultSettings->defaults; $this->assertEqual($expected['fileModel'], $results['fileModel']); $this->assertEqual($expected['fileVar'], $results['fileVar']); $this->assertEqual($expected['allowedTypes'], $results['allowedTypes']); $this->assertEqual($expected['fields'], $results['fields']); $this->assertEqual($expected['massSave'], $results['massSave']); $this->assertEqual($expected['automatic'], $results['automatic']); //change uploadDir on the fly. $results = $this->FileUpload->image('ignore', array('uploadDir' => 'something/different')); $this->assertEqual('something/different', $this->FileUpload->settings['uploadDir']); } function endTest(){ unset($this->FileUpload); } } ?>
ingkebil/trost
site/cakephp-cakephp-ba95d56/app/plugins/file_upload/tests/cases/helpers/file_upload.test.php
PHP
gpl-2.0
1,448
/* * drivers/gpu/ion/ion.c * * Copyright (C) 2011 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/device.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/miscdevice.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/debugfs.h> #include <linux/dma-buf.h> #include <linux/idr.h> #include <linux/mtk_ion.h> #include "ion_priv.h" #include "compat_ion.h" #include "ion_profile.h" #define ION_DEBUG 0 #if ION_DEBUG #include <linux/ion_drv.h> #include "ion_debug.h" #include "ion_debug_db.h" #include <linux/kallsyms.h> #include <linux/module.h> #define ION_DEBUG_INFO KERN_DEBUG #define ION_DEBUG_TRACE KERN_DEBUG #define ION_DEBUG_ERROR KERN_ERR #define ION_DEBUG_WARN KERN_WARNING extern struct mutex buffer_lifecycle_mutex; #endif #define DEBUG_HEAP_SHRINKER #if 0 //we move it to ion_priv.h. so we can dump every buffer info in ion_mm_heap.c /** * struct ion_device - the metadata of the ion device node * @dev: the actual misc device * @buffers: an rb tree of all the existing buffers * @buffer_lock: lock protecting the tree of buffers * @lock: rwsem protecting the tree of heaps and clients * @heaps: list of all the heaps in the system * @user_clients: list of all the clients created from userspace */ struct ion_device { struct miscdevice dev; struct rb_root buffers; struct mutex buffer_lock; struct rw_semaphore lock; struct plist_head heaps; long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg); struct rb_root clients; struct dentry *debug_root; struct dentry *heaps_debug_root; struct dentry *clients_debug_root; }; /** * struct ion_client - a process/hw block local address space * @node: node in the tree of all clients * @dev: backpointer to ion device * @handles: an rb tree of all the handles in this client * @idr: an idr space for allocating handle ids * @lock: lock protecting the tree of handles * @name: used for debugging * @display_name: used for debugging (unique version of @name) * @display_serial: used for debugging (to make display_name unique) * @task: used for debugging * * A client represents a list of buffers this client may access. * The mutex stored here is used to protect both handles tree * as well as the handles themselves, and should be held while modifying either. */ struct ion_client { struct rb_node node; struct ion_device *dev; struct rb_root handles; struct idr idr; struct mutex lock; const char *name; char *display_name; int display_serial; struct task_struct *task; pid_t pid; struct dentry *debug_root; }; struct ion_handle_debug { pid_t pid; pid_t tgid; unsigned int backtrace[BACKTRACE_SIZE]; unsigned int backtrace_num; }; /** * ion_handle - a client local reference to a buffer * @ref: reference count * @client: back pointer to the client the buffer resides in * @buffer: pointer to the buffer * @node: node in the client's handle rbtree * @kmap_cnt: count of times this client has mapped to kernel * @id: client-unique id allocated by client->idr * * Modifications to node, map_cnt or mapping should be protected by the * lock in the client. Other fields are never changed after initialization. */ struct ion_handle { struct kref ref; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; unsigned int kmap_cnt; int id; #if ION_RUNTIME_DEBUGGER struct ion_handle_debug dbg; #endif }; #endif #if ION_DEBUG static void ion_debug_db_create_clentry(pid_t); static void ion_debug_db_destroy_clentry(pid_t pid); static void ion_debug_create_db(struct dentry *root); #endif static int ion_debug_kern_rec(struct ion_client *client, struct ion_buffer *buffer, struct ion_handle * handle, unsigned int action, unsigned int address_type, unsigned int address, unsigned length, int fd); bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) { return (buffer->flags & ION_FLAG_CACHED) && !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); } bool ion_buffer_cached(struct ion_buffer *buffer) { return !!(buffer->flags & ION_FLAG_CACHED); } static inline struct page *ion_buffer_page(struct page *page) { return (struct page *)((unsigned long)page & ~(1UL)); } static inline bool ion_buffer_page_is_dirty(struct page *page) { return !!((unsigned long)page & 1UL); } static inline void ion_buffer_page_dirty(struct page **page) { *page = (struct page *)((unsigned long)(*page) | 1UL); } static inline void ion_buffer_page_clean(struct page **page) { *page = (struct page *)((unsigned long)(*page) & ~(1UL)); } /* this function should only be called while dev->lock is held */ static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) { struct rb_node **p = &dev->buffers.rb_node; struct rb_node *parent = NULL; struct ion_buffer *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_buffer, node); if (buffer < entry) { p = &(*p)->rb_left; } else if (buffer > entry) { p = &(*p)->rb_right; } else { pr_err("%s: buffer already found.", __func__); BUG(); } } rb_link_node(&buffer->node, parent, p); rb_insert_color(&buffer->node, &dev->buffers); } /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, unsigned long align, unsigned long flags) { struct ion_buffer *buffer; struct sg_table *table; struct scatterlist *sg; int i, ret; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); if (!buffer) { IONMSG("%s kzalloc failed, buffer is null.\n", __func__); return ERR_PTR(-ENOMEM); } buffer->heap = heap; buffer->flags = flags; kref_init(&buffer->ref); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) { if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) goto err2; ion_heap_freelist_drain(heap, 0); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) goto err2; } buffer->dev = dev; buffer->size = len; table = heap->ops->map_dma(heap, buffer); if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error")) table = ERR_PTR(-EINVAL); if (IS_ERR(table)) { IONMSG("%s table is err 0x%p.\n", __func__, table); heap->ops->free(buffer); kfree(buffer); return ERR_PTR(PTR_ERR(table)); } buffer->sg_table = table; if (ion_buffer_fault_user_mappings(buffer)) { int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; struct scatterlist *sg; int i, j, k = 0; buffer->pages = vmalloc(sizeof(struct page *) * num_pages); if (!buffer->pages) { IONMSG("%s vamlloc failed pages is null.\n", __func__); ret = -ENOMEM; goto err1; } for_each_sg(table->sgl, sg, table->nents, i) { struct page *page = sg_page(sg); for (j = 0; j < sg->length / PAGE_SIZE; j++) buffer->pages[k++] = page++; } if (ret) goto err; } buffer->dev = dev; buffer->size = len; INIT_LIST_HEAD(&buffer->vmas); //log task pid for debug +by k.zhang { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); } mutex_init(&buffer->lock); /* this will set up dma addresses for the sglist -- it is not technically correct as per the dma api -- a specific device isn't really taking ownership here. However, in practice on our systems the only dma_address space is physical addresses. Additionally, we can't afford the overhead of invalidating every allocation via dma_map_sg. The implicit contract here is that memory comming from the heaps is ready for dma, ie if it has a cached mapping that mapping has been invalidated */ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { sg_dma_address(sg) = sg_phys(sg); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif } mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); return buffer; err: heap->ops->unmap_dma(heap, buffer); heap->ops->free(buffer); err1: if (buffer->pages) vfree(buffer->pages); err2: kfree(buffer); return ERR_PTR(ret); } void ion_buffer_destroy(struct ion_buffer *buffer) { if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->heap->ops->free(buffer); if (buffer->pages) vfree(buffer->pages); kfree(buffer); } static void _ion_buffer_destroy(struct kref *kref) { struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); struct ion_heap *heap = buffer->heap; struct ion_device *dev = buffer->dev; mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_freelist_add(heap, buffer); else ion_buffer_destroy(buffer); } static void ion_buffer_get(struct ion_buffer *buffer) { kref_get(&buffer->ref); } static int ion_buffer_put(struct ion_buffer *buffer) { return kref_put(&buffer->ref, _ion_buffer_destroy); } static void ion_buffer_add_to_handle(struct ion_buffer *buffer) { mutex_lock(&buffer->lock); buffer->handle_count++; mutex_unlock(&buffer->lock); } static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) { /* * when a buffer is removed from a handle, if it is not in * any other handles, copy the taskcomm and the pid of the * process it's being removed from into the buffer. At this * point there will be no way to track what processes this buffer is * being used by, it only exists as a dma_buf file descriptor. * The taskcomm and pid can provide a debug hint as to where this fd * is in the system */ mutex_lock(&buffer->lock); buffer->handle_count--; BUG_ON(buffer->handle_count < 0); if (!buffer->handle_count) { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); } mutex_unlock(&buffer->lock); } static struct ion_handle *ion_handle_create(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle; handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); if (!handle) { IONMSG("%s kzalloc failed handle is null.\n", __func__); return ERR_PTR(-ENOMEM); } kref_init(&handle->ref); RB_CLEAR_NODE(&handle->node); handle->client = client; ion_buffer_get(buffer); ion_buffer_add_to_handle(buffer); handle->buffer = buffer; return handle; } static void ion_handle_kmap_put(struct ion_handle *); static void ion_handle_destroy(struct kref *kref) { struct ion_handle *handle = container_of(kref, struct ion_handle, ref); struct ion_client *client = handle->client; struct ion_buffer *buffer = handle->buffer; mutex_lock(&buffer->lock); while (handle->kmap_cnt) ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); idr_remove(&client->idr, handle->id); if (!RB_EMPTY_NODE(&handle->node)) rb_erase(&handle->node, &client->handles); ion_buffer_remove_from_handle(buffer); ion_buffer_put(buffer); handle->buffer = NULL; handle->client = NULL; kfree(handle); } struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) { return handle->buffer; } static void ion_handle_get(struct ion_handle *handle) { kref_get(&handle->ref); } static int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; mutex_lock(&client->lock); ret = kref_put(&handle->ref, ion_handle_destroy); mutex_unlock(&client->lock); return ret; } static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { struct rb_node *n = client->handles.rb_node; while (n) { struct ion_handle *entry = rb_entry(n, struct ion_handle, node); if (buffer < entry->buffer) n = n->rb_left; else if (buffer > entry->buffer) n = n->rb_right; else return entry; } return ERR_PTR(-EINVAL); } static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, int id) { struct ion_handle *handle; mutex_lock(&client->lock); handle = idr_find(&client->idr, id); if (handle) ion_handle_get(handle); mutex_unlock(&client->lock); return handle ? handle : ERR_PTR(-EINVAL); } static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { WARN_ON(!mutex_is_locked(&client->lock)); return (idr_find(&client->idr, handle->id) == handle); } static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) { int id; struct rb_node **p = &client->handles.rb_node; struct rb_node *parent = NULL; struct ion_handle *entry; id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); if (id < 0) { IONMSG("%s idr_alloc failed id = %d.\n", __func__, id); return id; } handle->id = id; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_handle, node); if (handle->buffer < entry->buffer) p = &(*p)->rb_left; else if (handle->buffer > entry->buffer) p = &(*p)->rb_right; else WARN(1, "%s: buffer already found.", __func__); } rb_link_node(&handle->node, parent, p); rb_insert_color(&handle->node, &client->handles); return 0; } struct ion_handle *__ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, unsigned int flags) { struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; struct ion_heap *heap; int ret; pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, len, align, heap_id_mask, flags); /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ len = PAGE_ALIGN(len); if (!len) { IONMSG("%s len cannot be zero.\n", __func__); return ERR_PTR(-EINVAL); } //add by k.zhang for sgtable_init KE bug if((len > 1024*1024*1024)) { IONMSG("%s error: size (%zu) is more than 1G !!\n", __FUNCTION__,len); return ERR_PTR(-EINVAL); } MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagStart, len, 0); down_read(&dev->lock); plist_for_each_entry(heap, &dev->heaps, node) { /* if the caller didn't specify this heap id */ if (!((1 << heap->id) & heap_id_mask)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR(buffer)) break; } up_read(&dev->lock); if (buffer == NULL) { IONMSG("%s buffer is null.\n", __func__); return ERR_PTR(-ENODEV); } if (IS_ERR(buffer)) { IONMSG("%s buffer is error 0x%p.\n", __func__, buffer); return ERR_PTR(PTR_ERR(buffer)); } handle = ion_handle_create(client, buffer); /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); if (IS_ERR(handle)) { IONMSG("%s handle is error 0x%p.\n", __func__, handle); return handle; } mutex_lock(&client->lock); ret = ion_handle_add(client, handle); mutex_unlock(&client->lock); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); IONMSG("%s ion handle add failed %d.\n", __func__, ret); } MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagEnd, buffer->size, 0); return handle; } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, unsigned int flags) { struct ion_handle * handle; handle = __ion_alloc(client, len, align, heap_id_mask, flags); if(IS_ERR_OR_NULL(handle)) { IONMSG("%s handle is error 0x%p", __func__, handle); return handle; } ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_ALLOC, 0, 0, 0, 0); return handle; } EXPORT_SYMBOL(ion_alloc); void __ion_free(struct ion_client *client, struct ion_handle *handle, int from_kern) { bool valid_handle; BUG_ON(client != handle->client); mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); mutex_unlock(&client->lock); return; } mutex_unlock(&client->lock); if(from_kern) ion_debug_kern_rec(client, handle->buffer, NULL, ION_FUNCTION_FREE, 0, 0, 0, 0); ion_handle_put(handle); } void ion_free(struct ion_client *client, struct ion_handle *handle) { return __ion_free(client, handle, 1); } EXPORT_SYMBOL(ion_free); int ion_phys(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) { struct ion_buffer *buffer; int ret; MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagStart, (unsigned long)client, (unsigned long)handle); mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { mutex_unlock(&client->lock); IONMSG("%s invalid handle pass to phys.\n", __func__); return -EINVAL; } buffer = handle->buffer; if (!buffer->heap->ops->phys) { pr_err("%s: ion_phys is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return -ENODEV; } mutex_unlock(&client->lock); ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagEnd, buffer->size, *addr); return ret; } EXPORT_SYMBOL(ion_phys); static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; if (buffer->kmap_cnt) { buffer->kmap_cnt++; return buffer->vaddr; } vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error")) return ERR_PTR(-EINVAL); if (IS_ERR(vaddr)) { IONMSG("%s map kernel is failed addr = 0x%p.\n", __func__, vaddr); return vaddr; } buffer->vaddr = vaddr; buffer->kmap_cnt++; return vaddr; } static void *ion_handle_kmap_get(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; void *vaddr; if (handle->kmap_cnt) { handle->kmap_cnt++; return buffer->vaddr; } vaddr = ion_buffer_kmap_get(buffer); if (IS_ERR(vaddr)) { IONMSG("%s vadd is error 0x%p.\n", __func__, vaddr); return vaddr; } handle->kmap_cnt++; return vaddr; } static void ion_buffer_kmap_put(struct ion_buffer *buffer) { buffer->kmap_cnt--; if (!buffer->kmap_cnt) { MMProfileLogEx(ION_MMP_Events[PROFILE_UNMAP_KERNEL], MMProfileFlagStart, buffer->size, 0); buffer->heap->ops->unmap_kernel(buffer->heap, buffer); MMProfileLogEx(ION_MMP_Events[PROFILE_UNMAP_KERNEL], MMProfileFlagEnd, buffer->size, 0); buffer->vaddr = NULL; } } static void ion_handle_kmap_put(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; handle->kmap_cnt--; if (!handle->kmap_cnt) ion_buffer_kmap_put(buffer); } void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; void *vaddr; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; if (!handle->buffer->heap->ops->map_kernel) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } mutex_lock(&buffer->lock); vaddr = ion_handle_kmap_get(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_MMAP, ADDRESS_KERNEL_VIRTUAL, (unsigned long)vaddr, handle->buffer->size, 0); return vaddr; } EXPORT_SYMBOL(ion_map_kernel); void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_MUNMAP, ADDRESS_KERNEL_VIRTUAL, (unsigned long)handle->buffer->vaddr, handle->buffer->size, 0); mutex_lock(&client->lock); buffer = handle->buffer; mutex_lock(&buffer->lock); ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_unmap_kernel); static int ion_debug_client_show(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; size_t sizes[ION_NUM_HEAP_IDS] = {0}; const char *names[ION_NUM_HEAP_IDS] = {NULL}; int i; seq_printf(s, "%16.s %8.s %8.s %8.s %8.s %8.s\n", "heap_name","pid", "size", "handle_count","handle","buffer"); mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); unsigned int id = handle->buffer->heap->id; if (!names[id]) names[id] = handle->buffer->heap->name; sizes[id] += handle->buffer->size; struct ion_buffer *buffer = handle->buffer; seq_printf(s, "%16.s %3d %8zu %3d %p %p.\n", buffer->heap->name, client->pid, buffer->size, buffer->handle_count, handle, buffer); } mutex_unlock(&client->lock); seq_printf(s, "----------------------------------------------------\n"); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); for (i = 0; i < ION_NUM_HEAP_IDS; i++) { if (!names[i]) continue; seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); } return 0; } static int ion_debug_client_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_client_show, inode->i_private); } static const struct file_operations debug_client_fops = { .open = ion_debug_client_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ion_get_client_serial(const struct rb_root *root, const unsigned char *name) { int serial = -1; struct rb_node *node; for (node = rb_first(root); node; node = rb_next(node)) { struct ion_client *client = rb_entry(node, struct ion_client, node); if (strcmp(client->name, name)) continue; serial = max(serial, client->display_serial); } return serial + 1; } struct ion_client *__ion_client_create(struct ion_device *dev, const char *name) { struct ion_client *client; struct task_struct *task; struct rb_node **p; struct rb_node *parent = NULL; struct ion_client *entry; pid_t pid; if (!name) { pr_err("%s: Name cannot be null\n", __func__); return ERR_PTR(-EINVAL); } get_task_struct(current->group_leader); task_lock(current->group_leader); pid = task_pid_nr(current->group_leader); /* don't bother to store task struct for kernel threads, they can't be killed anyway */ if (current->group_leader->flags & PF_KTHREAD) { put_task_struct(current->group_leader); task = NULL; } else { task = current->group_leader; } task_unlock(current->group_leader); client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); if (!client) goto err_put_task_struct; client->dev = dev; client->handles = RB_ROOT; idr_init(&client->idr); mutex_init(&client->lock); client->task = task; client->pid = pid; client->name = kstrdup(name, GFP_KERNEL); if (!client->name) goto err_free_client; down_write(&dev->lock); client->display_serial = ion_get_client_serial(&dev->clients, name); client->display_name = kasprintf( GFP_KERNEL, "%s-%d", name, client->display_serial); if (!client->display_name) { up_write(&dev->lock); goto err_free_client_name; } p = &dev->clients.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_client, node); if (client < entry) p = &(*p)->rb_left; else if (client > entry) p = &(*p)->rb_right; } rb_link_node(&client->node, parent, p); rb_insert_color(&client->node, &dev->clients); client->debug_root = debugfs_create_file(client->display_name, 0664, dev->clients_debug_root, client, &debug_client_fops); if (!client->debug_root) { char buf[256], *path; path = dentry_path(dev->clients_debug_root, buf, 256); pr_err("Failed to create client debugfs at %s/%s\n", path, client->display_name); } up_write(&dev->lock); #if ION_DEBUG ion_debug_db_create_clentry(client->pid); #endif return client; err_free_client_name: kfree(client->name); err_free_client: kfree(client); err_put_task_struct: if (task) put_task_struct(current->group_leader); return ERR_PTR(-ENOMEM); } struct ion_client *ion_client_create(struct ion_device *dev, const char *name) { struct ion_client *client; client = __ion_client_create(dev, name); if(IS_ERR_OR_NULL(client)) { IONMSG("%s client is error or null 0x%p.\n", __func__, client); return client; } ion_debug_kern_rec(client, NULL, NULL, ION_FUNCTION_CREATE_CLIENT, 0, 0, 0, 0); return client; } EXPORT_SYMBOL(ion_client_create); void __ion_client_destroy(struct ion_client *client, int from_kern) { struct ion_device *dev = client->dev; struct rb_node *n; pr_debug("%s: %d\n", __func__, __LINE__); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); mutex_lock(&client->lock); IONMSG("warning: release handle @ client destory: handle=%p, buf=%p, ref=%d, size=%zu, kmap=%d\n", handle, handle->buffer, atomic_read(&handle->buffer->ref.refcount), handle->buffer->size, handle->buffer->kmap_cnt); ion_handle_destroy(&handle->ref); mutex_unlock(&client->lock); } idr_destroy(&client->idr); down_write(&dev->lock); if (client->task) put_task_struct(client->task); rb_erase(&client->node, &dev->clients); debugfs_remove_recursive(client->debug_root); up_write(&dev->lock); kfree(client->display_name); kfree(client->name); #if ION_DEBUG if(from_kern) ion_debug_kern_rec(client, NULL, NULL, ION_FUNCTION_DESTROY_CLIENT, 0, 0, 0, 0); ion_debug_db_destroy_clentry(client->pid); #endif kfree(client); } void ion_client_destroy(struct ion_client *client) { __ion_client_destroy(client, 1); } EXPORT_SYMBOL(ion_client_destroy); struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct sg_table *table; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_dma.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; table = buffer->sg_table; mutex_unlock(&client->lock); return table; } EXPORT_SYMBOL(ion_sg_table); static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction direction); static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; ion_buffer_sync_for_device(buffer, attachment->dev, direction); return buffer->sg_table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { } void ion_pages_sync_for_device(struct device *dev, struct page *page, size_t size, enum dma_data_direction dir) { struct scatterlist sg; sg_init_table(&sg, 1); sg_set_page(&sg, page, size, 0); /* * This is not correct - sg_dma_address needs a dma_addr_t that is valid * for the the targeted device, but this works on the currently targeted * hardware. */ sg_dma_address(&sg) = page_to_phys(page); dma_sync_sg_for_device(dev, &sg, 1, dir); } struct ion_vma_list { struct list_head list; struct vm_area_struct *vma; }; static void ion_buffer_sync_for_device(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction dir) { struct ion_vma_list *vma_list; int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; int i; pr_debug("%s: syncing for device %s\n", __func__, dev ? dev_name(dev) : "null"); if (!ion_buffer_fault_user_mappings(buffer)) return; mutex_lock(&buffer->lock); for (i = 0; i < pages; i++) { struct page *page = buffer->pages[i]; if (ion_buffer_page_is_dirty(page)) ion_pages_sync_for_device(dev, ion_buffer_page(page), PAGE_SIZE, dir); ion_buffer_page_clean(buffer->pages + i); } list_for_each_entry(vma_list, &buffer->vmas, list) { struct vm_area_struct *vma = vma_list->vma; zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL); } mutex_unlock(&buffer->lock); } static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ion_buffer *buffer = vma->vm_private_data; unsigned long pfn; int ret; mutex_lock(&buffer->lock); ion_buffer_page_dirty(buffer->pages + vmf->pgoff); BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); mutex_unlock(&buffer->lock); if (ret) { IONMSG("%s vm insert pfn failed, vma = 0x%p, addr = 0x%p, pfn = %lu.\n", __func__, vma, vmf->virtual_address, pfn); return VM_FAULT_ERROR; } return VM_FAULT_NOPAGE; } static void ion_vm_open(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list; vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); if (!vma_list) { IONMSG("%s kmalloc failed, vma_list is null.\n", __func__); return; } vma_list->vma = vma; mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); pr_debug("%s: adding %p\n", __func__, vma); } static void ion_vm_close(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list, *tmp; pr_debug("%s\n", __func__); mutex_lock(&buffer->lock); list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { if (vma_list->vma != vma) continue; list_del(&vma_list->list); kfree(vma_list); pr_debug("%s: deleting %p\n", __func__, vma); break; } mutex_unlock(&buffer->lock); } static struct vm_operations_struct ion_vma_ops = { .open = ion_vm_open, .close = ion_vm_close, .fault = ion_vm_fault, }; static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; int ret = 0; MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagStart, buffer->size, vma->vm_start); if (!buffer->heap->ops->map_user) { pr_err("%s: this heap does not define a method for mapping " "to userspace\n", __func__); return -EINVAL; } if (ion_buffer_fault_user_mappings(buffer)) { vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = buffer; vma->vm_ops = &ion_vma_ops; ion_vm_open(vma); return 0; } if (!(buffer->flags & ION_FLAG_CACHED)) //vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); mutex_unlock(&buffer->lock); if (ret) pr_err("%s: failure mapping buffer to userspace\n", __func__); MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagEnd, buffer->size, vma->vm_start); return ret; } static void ion_dma_buf_release(struct dma_buf *dmabuf) { struct ion_buffer *buffer = dmabuf->priv; ion_buffer_put(buffer); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct ion_buffer *buffer = dmabuf->priv; return buffer->vaddr + offset * PAGE_SIZE; } static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { return; } static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; void *vaddr; if (!buffer->heap->ops->map_kernel) { pr_err("%s: map kernel is not implemented by this heap.\n", __func__); return -ENODEV; } mutex_lock(&buffer->lock); vaddr = ion_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); return 0; } static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); } static struct dma_buf_ops dma_buf_ops = { .map_dma_buf = ion_map_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, .kmap_atomic = ion_dma_buf_kmap, .kunmap_atomic = ion_dma_buf_kunmap, .kmap = ion_dma_buf_kmap, .kunmap = ion_dma_buf_kunmap, }; struct dma_buf *ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct dma_buf *dmabuf; bool valid_handle; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to share.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; ion_buffer_get(buffer); mutex_unlock(&client->lock); dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); if (IS_ERR(dmabuf)) { IONMSG("%s dma buf export failed dmabuf is error 0x%p.\n", __func__, dmabuf); ion_buffer_put(buffer); return dmabuf; } return dmabuf; } EXPORT_SYMBOL(ion_share_dma_buf); int __ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle, int from_kern) { struct dma_buf *dmabuf; int fd; dmabuf = ion_share_dma_buf(client, handle); if (IS_ERR(dmabuf)) { IONMSG("%s dmabuf is err 0x%p.\n", __func__, dmabuf); return PTR_ERR(dmabuf); } fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) { IONMSG("%s dma_buf_fd failed %d.\n", __func__, fd); dma_buf_put(dmabuf); } #if ION_DEBUG if(from_kern) ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_SHARE, 0, 0, 0, fd); #endif return fd; } int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) { return __ion_share_dma_buf_fd(client, handle, 1); } EXPORT_SYMBOL(ion_share_dma_buf_fd); struct ion_handle *__ion_import_dma_buf(struct ion_client *client, int fd, int from_kern) { struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; int ret; MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagStart, 1, 1); dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) { IONMSG("%s dma_buf_get fail fd=%d ret=0x%p\n", __func__, fd, dmabuf); return ERR_PTR(PTR_ERR(dmabuf)); } /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR(handle)) { ion_handle_get(handle); mutex_unlock(&client->lock); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR(handle)) { mutex_unlock(&client->lock); IONMSG("%s handle is error 0x%p.\n", __func__, handle); goto end; } ret = ion_handle_add(client, handle); mutex_unlock(&client->lock); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); IONMSG("ion_import: ion_handle_add fail %d\n", ret); } end: dma_buf_put(dmabuf); #if ION_DEBUG if (!IS_ERR_OR_NULL(handle) && from_kern) ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_IMPORT, 0, 0, 0, 0); #endif MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagEnd, 1, 1); return handle; } struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) { return __ion_import_dma_buf(client, fd, 1); } EXPORT_SYMBOL(ion_import_dma_buf); static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) { IONMSG("%s dma_buf_get failed dmabuf is err %d, 0x%p.\n", __func__, fd, dmabuf); return PTR_ERR(dmabuf); } /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not sync dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return -EINVAL; } buffer = dmabuf->priv; dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_BIDIRECTIONAL); dma_buf_put(dmabuf); return 0; } /* fix up the cases where the ioctl direction bits are incorrect */ static unsigned int ion_ioctl_dir(unsigned int cmd) { switch (cmd) { case ION_IOC_SYNC: case ION_IOC_FREE: case ION_IOC_CUSTOM: return _IOC_WRITE; default: return _IOC_DIR(cmd); } } static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; struct ion_device *dev = client->dev; struct ion_handle *cleanup_handle = NULL; int ret = 0; unsigned int dir; union { struct ion_fd_data fd; struct ion_allocation_data allocation; struct ion_handle_data handle; struct ion_custom_data custom; } data; dir = ion_ioctl_dir(cmd); if (_IOC_SIZE(cmd) > sizeof(data)) { IONMSG("ion_ioctl cmd = %d, _IOC_SIZE(cmd) = %d, sizeof(data) = %zd.\n", cmd, _IOC_SIZE(cmd), sizeof(data)); return -EINVAL; } if (dir & _IOC_WRITE) { if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) { IONMSG("ion_ioctl copy_from_user fail!. cmd = %d, n = %d.\n", cmd, _IOC_SIZE(cmd)); return -EFAULT; } } switch (cmd) { case ION_IOC_ALLOC: { struct ion_handle *handle; handle = __ion_alloc(client, data.allocation.len, data.allocation.align, data.allocation.heap_id_mask, data.allocation.flags); if (IS_ERR(handle)) { ret = PTR_ERR(handle); IONMSG("ION_IOC_ALLOC handle is invalid. ret = %d.\n", ret); return ret; } data.allocation.handle = handle->id; cleanup_handle = handle; break; } case ION_IOC_FREE: { struct ion_handle *handle; handle = ion_handle_get_by_id(client, data.handle.handle); if (IS_ERR(handle)) { ret = PTR_ERR(handle); IONMSG("ION_IOC_FREE handle is invalid. handle = %d, ret = %d.\n", data.handle.handle, ret); return ret; } __ion_free(client, handle, 0); ion_handle_put(handle); break; } case ION_IOC_SHARE: case ION_IOC_MAP: { struct ion_handle *handle; handle = ion_handle_get_by_id(client, data.handle.handle); if (IS_ERR(handle)) { ret = PTR_ERR(handle); IONMSG("ION_IOC_SHARE handle is invalid. handle = %d, ret = %d.\n", data.handle.handle, ret); return ret; } data.fd.fd = __ion_share_dma_buf_fd(client, handle, 0); ion_handle_put(handle); if (data.fd.fd < 0) { IONMSG("ION_IOC_SHARE fd = %d.\n", data.fd.fd); ret = data.fd.fd; } break; } case ION_IOC_IMPORT: { struct ion_handle *handle; handle = __ion_import_dma_buf(client, data.fd.fd, 0); if (IS_ERR(handle)) { ret = PTR_ERR(handle); IONMSG("ion_import fail: fd=%d, ret=%d\n", data.fd.fd, ret); } else data.handle.handle = handle->id; break; } case ION_IOC_SYNC: { ret = ion_sync_for_device(client, data.fd.fd); break; } case ION_IOC_CUSTOM: { if (!dev->custom_ioctl) { IONMSG("ION_IOC_CUSTOM dev has no custom ioctl!.\n"); return -ENOTTY; } ret = dev->custom_ioctl(client, data.custom.cmd, data.custom.arg); break; } default: { IONMSG("ion_ioctl : No such command!! 0x%x\n", cmd); return -ENOTTY; } } if (dir & _IOC_READ) { if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { if (cleanup_handle) ion_free(client, cleanup_handle); IONMSG("ion_ioctl copy_to_user fail! cmd = %d, n = %d.\n", cmd, _IOC_SIZE(cmd)); return -EFAULT; } } return ret; } static int ion_release(struct inode *inode, struct file *file) { struct ion_client *client = file->private_data; pr_debug("%s: %d\n", __func__, __LINE__); __ion_client_destroy(client, 0); return 0; } static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; char debug_name[64]; pr_debug("%s: %d\n", __func__, __LINE__); snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); client = __ion_client_create(dev, debug_name); if (IS_ERR(client)) { IONMSG("%s ion client create failed 0x%p.\n", __func__, client); return PTR_ERR(client); } file->private_data = client; return 0; } static const struct file_operations ion_fops = { .owner = THIS_MODULE, .open = ion_open, .release = ion_release, .unlocked_ioctl = ion_ioctl, .compat_ioctl = compat_ion_ioctl, }; static size_t ion_debug_heap_total(struct ion_client *client, unsigned int id) { size_t size = 0; struct rb_node *n; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer->heap->id == id) size += handle->buffer->size; } mutex_unlock(&client->lock); return size; } static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; struct ion_device *dev = heap->dev; struct rb_node *n; size_t total_size = 0; size_t total_orphaned_size = 0; seq_printf(s, "%16.s(%16.s) %16.s %16.s %s\n", "client", "dbg_name", "pid", "size", "address"); seq_printf(s, "----------------------------------------------------\n"); down_read(&dev->lock); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); size_t size = ion_debug_heap_total(client, heap->id); if (!size) continue; if (client->task) { char task_comm[TASK_COMM_LEN]; get_task_comm(task_comm, client->task); seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%p\n", task_comm, client->dbg_name, client->pid, size, client); } else { seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%p\n", client->name, "from_kernel", client->pid, size, client); } } up_read(&dev->lock); seq_printf(s, "----------------------------------------------------\n"); seq_printf(s, "orphaned allocations (info is from last known client):" "\n"); mutex_lock(&dev->buffer_lock); for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node); if (buffer->heap->id != heap->id) continue; total_size += buffer->size; if (!buffer->handle_count) { seq_printf(s, "%16.s %16u %16zu %d %d\n", buffer->task_comm, buffer->pid, buffer->size, buffer->kmap_cnt, atomic_read(&buffer->ref.refcount)); total_orphaned_size += buffer->size; } } mutex_unlock(&dev->buffer_lock); seq_printf(s, "----------------------------------------------------\n"); seq_printf(s, "%16.s %16zu\n", "total orphaned", total_orphaned_size); seq_printf(s, "%16.s %16zu\n", "total ", total_size); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) seq_printf(s, "%16.s %16zu\n", "deferred free", heap->free_list_size); seq_printf(s, "----------------------------------------------------\n"); if (heap->debug_show) heap->debug_show(heap, s, unused); return 0; } static int ion_debug_heap_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_heap_show, inode->i_private); } static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ion_debug_heap_pool_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; struct ion_device *dev = heap->dev; struct rb_node *n; size_t total_size = heap->ops->page_pool_total(heap); seq_printf(s, "%16.s %16zu\n", "total_in_pool ", total_size); return 0; } static int ion_debug_heap_pool_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_heap_pool_show, inode->i_private); } static const struct file_operations debug_heap_pool_fops = { .open = ion_debug_heap_pool_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #ifdef DEBUG_HEAP_SHRINKER static int debug_shrink_set(void *data, u64 val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; if (!val) { IONMSG("%s val cannot be zero.\n", __func__); return 0; } objs = heap->shrinker.shrink(&heap->shrinker, &sc); sc.nr_to_scan = objs; heap->shrinker.shrink(&heap->shrinker, &sc); return 0; } static int debug_shrink_get(void *data, u64 *val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; objs = heap->shrinker.shrink(&heap->shrinker, &sc); *val = objs; return 0; } DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); #endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { struct dentry *debug_file; if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || !heap->ops->unmap_dma) pr_err("%s: can not add heap with invalid ops struct.\n", __func__); if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) ion_heap_init_deferred_free(heap); if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) ion_heap_init_shrinker(heap); heap->dev = dev; down_write(&dev->lock); /* use negative heap->id to reverse the priority -- when traversing the list later attempt higher id numbers first */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); debug_file = debugfs_create_file(heap->name, 0664, dev->heaps_debug_root, heap, &debug_heap_fops); if (!debug_file) { char buf[256], *path; path = dentry_path(dev->heaps_debug_root, buf, 256); pr_err("Failed to create heap debugfs at %s/%s\n", path, heap->name); } #ifdef DEBUG_HEAP_SHRINKER if (heap->shrinker.shrink) { char debug_name[64]; snprintf(debug_name, 64, "%s_shrink", heap->name); debug_file = debugfs_create_file( debug_name, 0644, dev->heaps_debug_root, heap, &debug_shrink_fops); if (!debug_file) { char buf[256], *path; path = dentry_path(dev->heaps_debug_root, buf, 256); pr_err("Failed to create heap shrinker debugfs at %s/%s\n", path, debug_name); } } #endif char tmp_name[64]; snprintf(tmp_name, 64, "%s_total_in_pool", heap->name); debug_file = debugfs_create_file( tmp_name, 0644, dev->heaps_debug_root, heap, &debug_heap_pool_fops); if (!debug_file) { char buf[256], *path; path = dentry_path(dev->heaps_debug_root, buf, 256); pr_err("Failed to create heap page pool debugfs at %s/%s\n", path, tmp_name); } up_write(&dev->lock); } struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg)) { struct ion_device *idev; int ret; idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); if (!idev) { IONMSG("%s kzalloc failed idev is null.\n", __func__); return ERR_PTR(-ENOMEM); } idev->dev.minor = MISC_DYNAMIC_MINOR; idev->dev.name = "ion"; idev->dev.fops = &ion_fops; idev->dev.parent = NULL; ret = misc_register(&idev->dev); if (ret) { pr_err("ion: failed to register misc device.\n"); return ERR_PTR(ret); } idev->debug_root = debugfs_create_dir("ion", NULL); if (!idev->debug_root) { pr_err("ion: failed to create debugfs root directory.\n"); goto debugfs_done; } idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); if (!idev->heaps_debug_root) { pr_err("ion: failed to create debugfs heaps directory.\n"); goto debugfs_done; } idev->clients_debug_root = debugfs_create_dir("clients", idev->debug_root); if (!idev->clients_debug_root) pr_err("ion: failed to create debugfs clients directory.\n"); debugfs_done: idev->custom_ioctl = custom_ioctl; idev->buffers = RB_ROOT; mutex_init(&idev->buffer_lock); init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; #if ION_DEBUG /* Create ION Debug DB Root */ ion_debug_create_db(idev->debug_root); #endif return idev; } void ion_device_destroy(struct ion_device *dev) { misc_deregister(&dev->dev); debugfs_remove_recursive(dev->debug_root); /* XXX need to free the heaps and clients ? */ kfree(dev); } void __init ion_reserve(struct ion_platform_data *data) { int i; for (i = 0; i < data->nr; i++) { if (data->heaps[i].size == 0) continue; if (data->heaps[i].base == 0) { phys_addr_t paddr; paddr = memblock_alloc_base(data->heaps[i].size, data->heaps[i].align, MEMBLOCK_ALLOC_ANYWHERE); if (!paddr) { pr_err("%s: error allocating memblock for " "heap %d\n", __func__, i); continue; } data->heaps[i].base = paddr; } else { int ret = memblock_reserve(data->heaps[i].base, data->heaps[i].size); if (ret) pr_err("memblock reserve of %zx@%lx failed\n", data->heaps[i].size, data->heaps[i].base); } pr_info("%s: %s reserved base %lx size %zu\n", __func__, data->heaps[i].name, data->heaps[i].base, data->heaps[i].size); } } //============================================================================ // helper functions //============================================================================ struct ion_handle* ion_drv_get_handle(struct ion_client* client, int user_handle, struct ion_handle* kernel_handle, int from_kernel) { struct ion_handle* handle; if (from_kernel) { handle = kernel_handle; if (IS_ERR_OR_NULL(handle)) { IONMSG("%s handle invalid, handle = 0x%p.\n", __FUNCTION__, handle); return ERR_PTR(-EINVAL); } mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { IONMSG("%s handle invalid, handle=0x%p\n", __FUNCTION__, handle); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } ion_handle_get(handle); mutex_unlock(&client->lock); } else { handle = ion_handle_get_by_id(client, user_handle); if (!handle) { IONMSG("%s handle invalid, handle_id=%d\n", __FUNCTION__, user_handle); return ERR_PTR(-EINVAL); } } return handle; } int ion_drv_put_kernel_handle(void *kernel_handle) { return ion_handle_put(kernel_handle); } int ion_device_destory_heaps(struct ion_device *dev, int need_lock) { struct ion_heap *heap, *tmp; int i; if(need_lock) down_write(&dev->lock); plist_for_each_entry_safe(heap, tmp, &dev->heaps, node) { plist_del(heap, &dev->heaps); ion_heap_destroy(heap); } if(need_lock) up_write(&dev->lock); return 0; } struct ion_heap * ion_drv_get_heap(struct ion_device *dev, int heap_id, int need_lock) { struct ion_heap *_heap, *heap = NULL, *tmp; int i; if(need_lock) down_write(&dev->lock); plist_for_each_entry_safe(_heap, tmp, &dev->heaps, node) { if(_heap->id == heap_id) { heap = _heap; break; } } if(need_lock) up_write(&dev->lock); return heap; } //============================================================================================= #if ION_DEBUG static int ion_debug_kern_rec(struct ion_client *client, struct ion_buffer *buffer, struct ion_handle * handle, unsigned int action, unsigned int address_type, unsigned int address, unsigned length, int fd) { ion_sys_record_t record_param; record_param.client = client; record_param.pid = client->pid; if(current->pid != current->tgid) { record_param.group_id = current->tgid; printk(ION_DEBUG_INFO "[KERNEL tgid is %d]\n",(unsigned int)current->tgid); } else { record_param.group_id = current->pid; } record_param.buffer = buffer; record_param.handle = handle; record_param.action = action; record_param.address_type = address_type; record_param.address = (unsigned int)address; record_param.length = length; record_param.fd = fd; record_param.backtrace_num = get_kernel_backtrace((unsigned long *)record_param.backtrace); get_kernel_symbol((unsigned long *)record_param.backtrace,record_param.backtrace_num,&(record_param.kernel_symbol[0])); record_ion_info((int)1,&record_param); return 0; } #else static int ion_debug_kern_rec(struct ion_client *client, struct ion_buffer *buffer, struct ion_handle * handle, unsigned int action, unsigned int address_type, unsigned int address, unsigned length, int fd) { return 0; } #endif #if ION_DEBUG /* * ION Debug assistant function */ static void *ion_get_list_from_buffer(struct ion_buffer *buf, unsigned int type) { struct ion_buffer_record *buf_rec = NULL; /* Get the inuse buffer record */ buf_rec = ion_get_inuse_buffer_record(); if (!buf_rec) { printk(KERN_WARNING "No inuse buffers!\n"); return NULL; } /* Go through it */ do { /* We only need to find out the record with corresponding buffer */ if (buf_rec->buffer_address == buf) { return ion_get_list(LIST_BUFFER,buf_rec, type); } /* Next record */ buf_rec = buf_rec->next; } while (!!buf_rec); return NULL; } /* * ION Debug assistant function */ static void *ion_get_list_from_process(pid_t pid, unsigned int type) { struct ion_process_record *process_rec = NULL; /* Get the inuse buffer record */ process_rec = (struct ion_process_record *)ion_get_inuse_process_usage_record2(); if (!process_rec) { printk(KERN_WARNING "No inuse process!\n"); return NULL; } /* Go through it */ do { /* We only need to find out the record with corresponding buffer */ if (process_rec->pid == pid) { return ion_get_list(LIST_PROCESS,process_rec, type); } /* Next record */ process_rec = process_rec->next; } while (!!process_rec); return NULL; } /* * ION Debug assistant function */ static void *ion_get_client_record(struct ion_client *client) { struct ion_client_usage_record *client_rec = NULL; /* Get the inuse buffer record */ client_rec = ion_get_inuse_client_record(); if (!client_rec) { printk(KERN_WARNING "No inuse client!\n"); return NULL; } /* Go through it */ do { /* We only need to find out the record with corresponding buffer */ if ((client_rec->tracking_info.from_kernel)&&(client_rec->tracking_info.recordID.client_address == (unsigned int)client) && (client_rec->tracking_info.recordID.group_pid == client->pid)) { return (void *)client_rec; } else if ((!client_rec->tracking_info.from_kernel)&&(client_rec->tracking_info.recordID.client_address == (unsigned int)client) && (client_rec->tracking_info.recordID.pid == client->pid)) { return (void *)client_rec; } /* Next record */ client_rec = (struct ion_client_usage_record *)client_rec->next; } while (!!client_rec); return NULL; } /* * ION Debug DB assistant function of showing backtrace */ static int ion_debugdb_show_backtrace(struct seq_file *s, struct ion_record_basic_info *ti, unsigned int sbt) { unsigned int i = 0; unsigned int backtrace_count = 0; ObjectEntry *tmp = NULL; unsigned int stringCount = KSYM_SYMBOL_LEN + 30; if (ti == NULL) { return 0; } if (sbt == ALLOCATE_BACKTRACE_INFO) { tmp = (ObjectEntry *)ti->allocate_backtrace; if (tmp == NULL) return 0; backtrace_count = tmp->numEntries; } else if (sbt == RELEASE_BACKTRACE_INFO) { tmp = (ObjectEntry *)ti->release_backtrace; if(tmp == NULL) return 0; backtrace_count = tmp->numEntries; } //printk("%s [%d] backtrace_count = (%d)\n",__FUNCTION__,__LINE__,backtrace_count); if (backtrace_count != 0) { seq_printf(s, "%19s\n", "[BACKTRACE]"); } for (i = 0;i < backtrace_count;i++) { char tmpString[stringCount]; ion_get_backtrace_info(ti, tmpString, stringCount, i, sbt); seq_printf(s, "%10s %s", "::", tmpString); } return 1; } /* * ION Debug DB file operations */ extern struct ion_device *g_ion_device; static int ion_debug_dbcl_show(struct seq_file *s, void *unused) { unsigned long key =(unsigned long) s->private; pid_t raw_key; enum dbcl_types type; struct ion_device *dev = g_ion_device; struct rb_node *cn, *hn; struct ion_client *client; struct ion_handle *handle; struct ion_buffer *buffer; int client_cnt = 0, buffer_cnt = 0; /* * Here is an introduction about how we convert key to raw_key. * * Firstly, we have following observations, * 1. Process IDs have a maximum bound of pid_max, which is rarely larger than PID_MAX_DEFAULT(0x8000). * (No-use)2. Kernel modules often have higher value than 0xbf000000 and are page-aligned. * (No-use)3. Other kernel parts often have higher value than 0xc0000000. * * Based on above observations, we can using following rules to change raw_key to key & vice versa. * 1. For processes, we use ((dbcl_types << 16) | raw_key) as the key, in which raw_key equals Process ID. * (No-use)2. For kernel modules, we use (raw_key | dbcl_types) as the key, in which raw_key is the virtual address the module is resident in. * (No-use)3. For other kernel parts, we use dbcl_types as the key. * */ #if 0 if (unlikely(key >= 0xbf000000)) { /* Rarely-used case */ } else if (likely(key >= 0x8000)) { type = key >> 16; raw_key = key & 0xffff; } else { /* Rarely-used case */ } #endif /* Which type */ type = key >> 16; /* Which process */ raw_key = key & 0xffff; seq_printf(s, "Process [%d]\n", raw_key); /* Which type */ switch (type) { case DBCL_CLIENT: /* Lv1 - all clients * Lv2 - all client-handles * Lv3 - all client-handle-buffers */ printk(KERN_INFO "DBCL_CLIENT\n"); { /* Lv1 - all clients */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); /* Matched clients */ if (client->pid == raw_key) { seq_printf(s, "%-8s[%2d] %12p\n", "client", client_cnt++, client); mutex_lock(&client->lock); /* Lv2 - all client-handles */ for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) { handle = rb_entry(hn, struct ion_handle, node); seq_printf(s, "%10s[%2d] kmap_cnt(%d)\n", "handle", buffer_cnt, handle->kmap_cnt); /* Lv3 - all client-handle-buffers */ buffer = handle->buffer; mutex_lock(&buffer->lock); seq_printf(s, "%10s[%2d] heap(%s) flags(%d) size(%d) kmap_cnt(%d) kvaddr(0x%x)\n", "buffer", buffer_cnt++, buffer->heap->name, (unsigned int)buffer->flags, buffer->size, (unsigned int)buffer->kmap_cnt, (unsigned int)buffer->vaddr); mutex_unlock(&buffer->lock); } mutex_unlock(&client->lock); } } } break; case DBCL_BUFFER: /* Lv1 - all buffers * Lv2 - all buffer-usage * */ printk(KERN_INFO "DBCL_BUFFER\n"); { struct ion_buffer_usage_record *usg_rec; struct ion_buffer_record *buf_rec = NULL; int buffer_count = 0; buf_rec = ion_get_inuse_buffer_record(); /* Find matched clients */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); /* Matched clients */ if (client->pid == raw_key) { mutex_lock(&client->lock); /* Lv1 - all buffers */ for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) { handle = rb_entry(hn, struct ion_handle, node); buffer = handle->buffer; mutex_lock(&buffer->lock); seq_printf(s, "%s[%2d] size(%d) %12p\n", "buffer", buffer_cnt++, buffer->size, buffer); mutex_unlock(&buffer->lock); /* Lv2 - all buffer-usage */ usg_rec = ion_get_list_from_buffer(buffer, BUFFER_ALLOCATION_LIST); if(usg_rec != NULL) seq_printf(s, "%s\n"," <BUFFER_ALLOCATION_LIST>"); while (!!usg_rec) { seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d]) \n"," client", usg_rec->tracking_info.recordID.client_address, "Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid); /* Show buffer allocation backtrace */ ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next buffer usage record */ usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT); } #if 0 usg_rec = ion_get_list_from_buffer(buffer, BUFFER_FREE_LIST); if(usg_rec != NULL) seq_printf(s, "%s\n"," <BUFFER_FREE_LIST>"); while (!!usg_rec) { seq_printf(s, "%s [0x%x] %10s [%d] \n"," client", usg_rec->tracking_info.recordID.client_address, "Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid); /* Show buffer free backtrace */ ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, RELEASE_BACKTRACE_INFO); /* Next buffer usage record */ usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT); } seq_printf(s, "%10s\n","=================================================="); #endif } mutex_unlock(&client->lock); } } while (buf_rec != NULL) { /* Allocation */ usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_ALLOCATION_LIST); while ((!!usg_rec) &&(usg_rec->tracking_info.recordID.pid== raw_key)) { buffer_count++; if(buffer_count == 1) { seq_printf(s, "%8s[%2d] buffer: 0x%p buffer structure adr: 0x%p size(%d)\n", "buffer", buffer_cnt++, buf_rec->buffer, buf_rec->buffer_address, buf_rec->buffer->size); } seq_printf(s, "%s\n"," <BUFFER_ALLOCATION_LIST>"); seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d])\n"," client", usg_rec->tracking_info.recordID.client_address, "Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid); /* Show buffer allocation backtrace */ ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next buffer usage record */ usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT); } buffer_count = 0; #if 0 /* Free */ usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_FREE_LIST); while ((!!usg_rec)&&(usg_rec->tracking_info.recordID.pid== raw_key)) { seq_printf(s, "%s\n"," <BUFFER_FREE_LIST>"); seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d])\n"," client", usg_rec->tracking_info.recordID.client_address, "Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid); /* Show buffer free backtrace */ ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, RELEASE_BACKTRACE_INFO); /* Next buffer usage record */ usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT); } #endif /* Next record */ buf_rec = buf_rec->next; } } break; case DBCL_MMAP: /* Lv1 - all buffers * Lv2 - all buffer-mmaps */ printk(KERN_INFO "DBCL_MMAP\n"); { struct ion_address_usage_record *adr_rec; struct ion_client_usage_record *client_rec; /* Find matched clients */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); /* Matched clients */ if (client->pid == raw_key) { mutex_lock(&client->lock); /* Lv1 - all buffers */ for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) { handle = rb_entry(hn, struct ion_handle, node); buffer = handle->buffer; mutex_lock(&buffer->lock); seq_printf(s, "%-8s[%2d] size(%d) %12p\n", "buffer", buffer_cnt++, buffer->size, buffer); mutex_unlock(&buffer->lock); /* Lv2 - all buffer-mmaps */ adr_rec = ion_get_list_from_buffer(buffer, ADDRESS_ALLOCATION_LIST); if(adr_rec != NULL) { seq_printf(s, "%10s\n","<ADDRESS_ALLOCATION_LIST_IN_KERNELSPACE>"); } while (!!adr_rec) { seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x]%10s [%d]\n", "Process", adr_rec->tracking_info.recordID.pid, "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size), "Size", adr_rec->size); /* Show address allocation backtrace */ ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next address record */ adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT); } adr_rec = ion_get_list_from_buffer(buffer, ADDRESS_FREE_LIST); if(adr_rec != NULL) { seq_printf(s, "%10s\n","<ADDRESS_FREE_LIST_IN_KERNELSPACE>"); } while (!!adr_rec) { seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x] %10s [%d]\n", "Process", adr_rec->tracking_info.recordID.pid, "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size), "Size", adr_rec->size); /* Show address release backtrace */ ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, RELEASE_BACKTRACE_INFO); /* Next address record */ adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT); } } client_rec = (struct ion_client_usage_record *)ion_get_client_record(client); if(client_rec != NULL) { adr_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, ADDRESS_ALLOCATION_LIST); if(adr_rec != NULL) seq_printf(s, "%10s\n","<ADDRESS_ALLOCATION_LIST_IN_USERSPACE>"); while (!!adr_rec) { seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x] %10s [%d]\n", "Process", adr_rec->tracking_info.recordID.pid, "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size), "Size", adr_rec->size); /* Show address allocation backtrace */ ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next address record */ adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT); } adr_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, ADDRESS_FREE_LIST); if(adr_rec != NULL) seq_printf(s, "%10s\n","<ADDRESS_FREE_LIST_IN_USERSPACE>"); while (!!adr_rec) { seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x] %10s [%d]\n", "Process", adr_rec->tracking_info.recordID.pid, "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size), "Size", adr_rec->size); /* Show address release backtrace */ ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, RELEASE_BACKTRACE_INFO); /* Next address record */ adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT); } } mutex_unlock(&client->lock); } } } break; case DBCL_FD: /* Lv1 - all buffers * Lv2 - all buffer-fds */ printk(KERN_INFO "DBCL_FD\n"); { struct ion_fd_usage_record *fd_rec; struct ion_client_usage_record *client_rec; /* Find matched clients */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); /* Matched clients */ if (client->pid == raw_key) { mutex_lock(&client->lock); /* Lv1 - all buffers */ client_rec = (struct ion_client_usage_record *)ion_get_client_record(client); //printk("[FD] client_rec %x input is %x groupd id is %d\n",client_rec,client,client->pid); if(client_rec != NULL) { //printk("[FD] client pid is %d\n",client_rec->tracking_info.recordID.pid); fd_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, FD_ALLOCATION_LIST); if(fd_rec != NULL) seq_printf(s, "%10s\n","<FD_ALLOCATION_LIST>"); //printk("[FD] get fd_rec %x\n",fd_rec); while (!!fd_rec) { seq_printf(s, "%10s [%d] %10s [%d]\n", "Process", fd_rec->tracking_info.recordID.pid, "inused fd", fd_rec->fd); /* Show address allocation backtrace */ ion_debugdb_show_backtrace(s, &fd_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next address record */ fd_rec = (struct ion_fd_usage_record *)ion_get_data_from_record((void *)fd_rec, RECORD_NEXT); } #if 0 fd_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, FD_FREE_LIST); if(fd_rec != NULL) seq_printf(s, "%10s\n","<FD_FREE_LIST>"); while (!!fd_rec) { seq_printf(s, "%10s [%d] %10s [%d]\n", "Process", fd_rec->tracking_info.recordID.pid, "freed fd", fd_rec->fd); /* Show address release backtrace */ ion_debugdb_show_backtrace(s, &fd_rec->tracking_info, RELEASE_BACKTRACE_INFO); /* Next address record */ fd_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)fd_rec, RECORD_NEXT); } #endif } mutex_unlock(&client->lock); } } } break; default: break; } return 0; } static int ion_debug_dbcl_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_dbcl_show, inode->i_private); } static const struct file_operations debug_dbcl_fops = { .open = ion_debug_dbcl_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int ion_debug_dbis_show(struct seq_file *s, void *unused) { unsigned long type = (unsigned long)s->private; unsigned long ori_type = type; struct ion_device *dev = g_ion_device; struct rb_node *cn, *hn; struct ion_client *client; struct ion_handle *handle; struct ion_buffer *buffer; int client_cnt = 0, buffer_cnt = 0,process_cnt = 0; struct ion_buffer_record *buf_rec = NULL; struct ion_process_record *process_rec = NULL; struct ion_client_usage_record *client_rec = NULL; /* History records */ if (type >= (unsigned long)DBIS_DIR) { printk(KERN_INFO "ION Debug History Records\n"); type -= (unsigned long)DBIS_DIR; switch ((enum dbis_types)type) { case DBIS_CLIENTS: { client_rec = ion_get_freed_client_record(); break; } case DBIS_BUFFERS: { buf_rec = ion_get_freed_buffer_record(); break; } case DBIS_MMAPS: { buf_rec = ion_get_freed_buffer_record(); } case DBIS_FDS: { process_rec = ion_get_freed_process_record(); break; } case DBIS_PIDS: { client_rec = ion_get_inuse_client_record(); process_rec = ion_get_inuse_process_usage_record2(); break; } case _TOTAL_DBIS: case DBIS_FILE: case DBIS_DIR: { break; } } } else { printk(KERN_INFO "ION Debug Non-History Records\n"); switch ((enum dbis_types)type) { case DBIS_CLIENTS: { client_rec = ion_get_inuse_client_record(); break; } case DBIS_BUFFERS: { buf_rec = ion_get_inuse_buffer_record(); break; } case DBIS_MMAPS: { buf_rec = ion_get_inuse_buffer_record(); } case DBIS_FDS: { process_rec = ion_get_inuse_process_usage_record2(); break; } case DBIS_PIDS: { client_rec = ion_get_inuse_client_record(); process_rec = ion_get_inuse_process_usage_record2(); break; } case _TOTAL_DBIS: case DBIS_FILE: case DBIS_DIR: { break; } } } /* Non-history records */ switch ((enum dbis_types)type) { case DBIS_CLIENTS: printk(KERN_INFO "DBIS_CLIENTS\n"); { #if 0 /* All clients */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); seq_printf(s, "\n%8s[%2d] 0x%p PID[%d]\n", "client", client_cnt++, client, client->pid); mutex_lock(&client->lock); /* All client-handles */ for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) { handle = rb_entry(hn, struct ion_handle, node); seq_printf(s, "%10s[%2d] kmap_cnt(%d)\n", "handle", buffer_cnt, handle->kmap_cnt); /* All client-handle-buffers */ buffer = handle->buffer; mutex_lock(&buffer->lock); seq_printf(s, "%10s[%2d] heap(%s) address(0x%x) flags(%d) size(%d) kmap_cnt(%d) kvaddr(0x%x)\n", "buffer", buffer_cnt++, buffer->heap->name,buffer,buffer->flags, buffer->size, buffer->kmap_cnt, buffer->vaddr); mutex_unlock(&buffer->lock); } mutex_unlock(&client->lock); buffer_cnt = 0; } #endif client_cnt = 0; while(client_rec != NULL) { seq_printf(s, "\n[%2d]%s: fd[%d] 0x%p PID[%d] GROUP_PID[%d]\n",client_cnt++,"client",client_rec->fd, client_rec->tracking_info.recordID.client,client_rec->tracking_info.recordID.pid,client_rec->tracking_info.recordID.group_pid); /* Show buffer allocation backtrace */ seq_printf(s, " %s\n","<CLIENT_ALLOCATION_LIST>"); ion_debugdb_show_backtrace(s, &client_rec->tracking_info,ALLOCATE_BACKTRACE_INFO); if (ori_type >= (unsigned long)DBIS_DIR) { seq_printf(s, " %s\n","<CLIENT_FREE_LIST>"); ion_debugdb_show_backtrace(s, &client_rec->tracking_info,RELEASE_BACKTRACE_INFO); } client_rec = (struct ion_client_usage_record *)client_rec->next; } } break; case DBIS_BUFFERS: printk(KERN_INFO "DBIS_BUFFERS\n"); { struct ion_buffer_usage_record *usg_rec; #if 0 buf_rec = ion_get_inuse_buffer_record(); #endif while (buf_rec != NULL) { seq_printf(s, "%8s[%2d][0x%x] buffer structure: 0x%p size(%d)\n", "buffer", buffer_cnt++,(unsigned int)buf_rec->buffer,buf_rec->buffer_address, buf_rec->buffer->size); /* Allocation */ usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_ALLOCATION_LIST); if(usg_rec) { seq_printf(s, "%30s\n","<BUFFER_ALLOCATION_LIST>"); } while (!!usg_rec) { if(usg_rec->function_type == ION_FUNCTION_ALLOC) { seq_printf(s, "%15s [%d] (%s [%d]) %s (0x%x) FUNCTION %s\n","Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid,"handle",(unsigned int)usg_rec->handle,"ION_ALLOC"); }else { seq_printf(s, "%15s [%d] (%s [%d]) %s (0x%x) FUNCTION %s\n","Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid,"handle",(unsigned int)usg_rec->handle,"ION_IMPORT"); } /* Show buffer allocation backtrace */ ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next buffer usage record */ usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT); } #if 0 /* Free */ seq_printf(s, "%30s\n","<BUFFER_FREE_LIST>"); usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_FREE_LIST); while (!!usg_rec) { seq_printf(s, "%15s [%d] (%15s [%d])\n","Process", usg_rec->tracking_info.recordID.pid, "GroupLeader", usg_rec->tracking_info.recordID.group_pid); /* Show buffer free backtrace */ ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, RELEASE_BACKTRACE_INFO); /* Next buffer usage record */ usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT); } #endif /* Next record */ buf_rec = buf_rec->next; } } break; case DBIS_MMAPS: printk(KERN_INFO "DBIS_MMAPS\n"); { struct ion_address_usage_record *adr_rec = NULL; struct ion_address_usage_record *adr_rec_free = NULL; struct ion_address_usage_record *adr_rec_user = NULL; struct ion_address_usage_record *adr_rec_user_free = NULL; seq_printf(s, "%8s\n","<USERSPACE MAPPING>"); while (process_rec != NULL) { /* USER MMAP */ adr_rec_user = ion_get_list(LIST_PROCESS,process_rec, ADDRESS_ALLOCATION_LIST); adr_rec_user_free = ion_get_list(LIST_PROCESS,process_rec, ADDRESS_FREE_LIST); if((adr_rec_user == NULL) && (adr_rec_user_free == NULL)) { process_rec = process_rec->next; continue; } if(process_rec == NULL) break; seq_printf(s, "[%2d]%8s[0x%x] [%d] group_id [%d]\n",process_cnt++,"process",(unsigned int)process_rec, process_rec->pid, process_rec->group_id); if(adr_rec_user != NULL) { seq_printf(s, " %s\n","<ADDRESS_ALLOCATION_LIST>"); } else { seq_printf(s, " %s\n","<NO ADDRESS_ALLOCATION_LIST>"); } while (!!adr_rec_user) { seq_printf(s, " %s[0x%x] [%d] - %s [0x%x] - [0x%x] %10s [%d]\n", "Process",(unsigned int)process_rec, adr_rec_user->tracking_info.recordID.pid, "Address", adr_rec_user->mapping_address,(adr_rec_user->mapping_address + adr_rec_user->size), "Size", adr_rec_user->size); /* Show fd allocation backtrace */ ion_debugdb_show_backtrace(s, &adr_rec_user->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next fd record */ adr_rec_user = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec_user, RECORD_NEXT); } if(adr_rec_user_free != NULL) { seq_printf(s, " %s\n","<ADDRESS_FREE_LIST>"); } else { seq_printf(s, " %s\n","<NO_ADDRESS_FREE_LIST>"); } while (!!adr_rec_user_free) { seq_printf(s, " %s[0x%x] [%d] - %s [0x%x] - [0x%x]%10s [%d]\n", "Process",(unsigned int)process_rec, adr_rec_user_free->tracking_info.recordID.pid, "Address", adr_rec_user_free->mapping_address,(adr_rec_user_free->mapping_address + adr_rec_user_free->size), "Size", adr_rec_user_free->size); /* Show fd release backtrace */ ion_debugdb_show_backtrace(s, &adr_rec_user_free->tracking_info, RELEASE_BACKTRACE_INFO); /* Next fd record */ adr_rec_user_free = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec_user_free, RECORD_NEXT); } /* Next record */ process_rec = process_rec->next; } seq_printf(s, "%s\n","<KENREL MAPPING>"); mutex_lock(&buffer_lifecycle_mutex); while (buf_rec != NULL) { mutex_lock(&buf_rec->ion_address_usage_mutex); /* Mapping */ adr_rec = ion_get_list(LIST_BUFFER,buf_rec, ADDRESS_ALLOCATION_LIST); /* Unmapping */ adr_rec_free = ion_get_list(LIST_BUFFER,buf_rec, ADDRESS_FREE_LIST); mutex_unlock(&buf_rec->ion_address_usage_mutex); if((adr_rec == NULL)&&(adr_rec_free == NULL)) { buf_rec = buf_rec->next; continue; } seq_printf(s, "%8s[%2d] size(%d) %12p\n", "buffer", buffer_cnt++, buf_rec->buffer->size, buf_rec->buffer); if(adr_rec != NULL) { seq_printf(s, " %s\n","<ADDRESS_ALLOCATION_LIST>"); } while (!!adr_rec) { seq_printf(s, "%8s [%d] - %20s [0x%x] - [0x%x] %10s [%d]\n", "Process", adr_rec->tracking_info.recordID.pid, "Address", adr_rec->mapping_address,(adr_rec_user->mapping_address + adr_rec_user->size), "Size", adr_rec->size); /* Show address allocation backtrace */ ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next address record */ adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT); } if(adr_rec_free != NULL) { seq_printf(s, " %s\n","<ADDRESS_FREE_LIST>"); } while (!!adr_rec_free) { seq_printf(s, "%8s [%d] - %20s [0x%x] - [0x%x] %10s [%d]\n", "Process", adr_rec_free->tracking_info.recordID.pid, "Address", adr_rec_free->mapping_address,(adr_rec_free->mapping_address + adr_rec_free->size), "Size", adr_rec_free->size); /* Show address release backtrace */ ion_debugdb_show_backtrace(s, &adr_rec_free->tracking_info, RELEASE_BACKTRACE_INFO); /* Next address record */ adr_rec_free = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec_free, RECORD_NEXT); } adr_rec = NULL; adr_rec_free = NULL; /* Next record */ buf_rec = buf_rec->next; } mutex_unlock(&buffer_lifecycle_mutex); } break; case DBIS_FDS: printk(KERN_INFO "DBIS_FDS\n"); { struct ion_fd_usage_record *fd_rec; while (process_rec != NULL) { /* FD */ fd_rec = ion_get_list(LIST_PROCESS,process_rec, FD_ALLOCATION_LIST); //fd_rec2 = ion_get_list(LIST_PROCESS,process_rec, FD_FREE_LIST); if(fd_rec == NULL) { process_rec = process_rec->next; continue; } seq_printf(s, "[%2d] %8s[0x%x] [%d] group_id [%d]\n",process_cnt++, "process",(unsigned int)process_rec, process_rec->pid,process_rec->group_id); if(fd_rec != NULL) { seq_printf(s, " %s\n","<FD_ALLOCATION_LIST>"); } else { seq_printf(s, " %s\n","<NO_FD_ALLOCATION_LIST>"); } while (!!fd_rec) { seq_printf(s, " %8s[0x%x] [%d] - %8s [%d]\n", "Process",(unsigned int)process_rec, fd_rec->tracking_info.recordID.pid, "inused Fd", fd_rec->fd); /* Show fd allocation backtrace */ ion_debugdb_show_backtrace(s, &fd_rec->tracking_info, ALLOCATE_BACKTRACE_INFO); /* Next fd record */ fd_rec = (struct ion_fd_usage_record *)ion_get_data_from_record((void *)fd_rec, RECORD_NEXT); } #if 0 if(fd_rec2 != NULL) { seq_printf(s, " %s\n","<FD_FREE_LIST>"); } while (!!fd_rec2) { seq_printf(s, "%7s[0x%x] [%d] - %6s [%d]\n", "Process",process_rec, fd_rec2->tracking_info.recordID.pid, "freed Fd", fd_rec2->fd); /* Show fd release backtrace */ ion_debugdb_show_backtrace(s, &fd_rec2->tracking_info, RELEASE_BACKTRACE_INFO); /* Next fd record */ fd_rec2 = (struct ion_fd_usage_record *)ion_get_data_from_record((void *)fd_rec2, RECORD_NEXT); } #endif /* Next record */ process_rec = process_rec->next; } } break; case DBIS_PIDS: printk(KERN_INFO "DBIS_PIDS\n"); { struct dbis_process_entry proclist = {.pid = -1, .clients = NULL, .next = NULL}; struct dbis_process_entry *pe = NULL; struct dbis_client_entry *ce = NULL; struct ion_process_record *current_process_rec = NULL; struct ion_client_usage_record *current_client_rec = NULL; struct ion_fd_usage_record *current_fd_usage_rec = NULL; struct ion_address_usage_record *current_mmap_usage_rec = NULL; process_rec = ion_get_inuse_process_usage_record2(); /* Firstly, we should go through all clients. */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); dbis_insert_proc_clients(&proclist, client, client->pid); } /* Now we can go through all processes using ION. */ pe = proclist.next; while (pe != NULL) { seq_printf(s, "%s[%d]\n","Process", pe->pid); current_process_rec = process_rec; while (current_process_rec != NULL) { if(current_process_rec->pid == pe->pid) { printk("found process pid %d in record\n",current_process_rec->pid); break; } current_process_rec = current_process_rec->next; } if(current_process_rec == NULL) { seq_printf(s, "ERROR!!!! can't find process pid %d in record \n",pe->pid); printk("ERROR!!!! can't find process pid %d in record\n",pe->pid); break; } /* Go through all clients for this pe */ ce = pe->clients; while (ce != NULL) { client = ce->client; current_client_rec = (struct ion_client_usage_record *)client_rec; while(current_client_rec != NULL) { if((current_client_rec->tracking_info.recordID.client_address == (unsigned int)client)&&(current_client_rec->tracking_info.recordID.pid == pe->pid)) { printk("found client client address 0x%x",current_client_rec->tracking_info.recordID.client_address); break; } current_client_rec = (struct ion_client_usage_record *)current_client_rec->next; } /* Show all client information */ if(current_client_rec != NULL) { seq_printf(s, "\n%8s[%2d] %12p fd[%d]\n", "client", client_cnt++, client,current_client_rec->fd); } else { seq_printf(s, "\n%8s[%2d] %12p\n", "client", client_cnt++, client); } mutex_lock(&client->lock); /* All client-handles */ for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) { handle = rb_entry(hn, struct ion_handle, node); seq_printf(s, "%10s[%2d](0x%x) ref_count %d kmap_cnt(%d)\n", "handle", buffer_cnt,(unsigned int)handle,atomic_read(&handle->ref.refcount),handle->kmap_cnt); /* All client-handle-buffers */ buffer = handle->buffer; current_fd_usage_rec = current_process_rec->fd_using_list; while(current_fd_usage_rec != NULL) { if((current_fd_usage_rec->buffer == buffer) && (current_fd_usage_rec->handle == handle)) { break; } current_fd_usage_rec = (struct ion_fd_usage_record *)current_fd_usage_rec->next; } mutex_lock(&buffer->lock); if(current_fd_usage_rec != NULL) { seq_printf(s, "%14s[%2d] fd(%d) heap(%s) ref_count(%d)flags(%d) buffer(0x%x) addr(0x%x) size(%d) \n", "--buffer", buffer_cnt++,current_fd_usage_rec->fd, buffer->heap->name,(int)atomic_read(&buffer->ref.refcount),(int)buffer->flags, (unsigned int)buffer,(unsigned int)buffer->vaddr,(int)buffer->size ); } else { seq_printf(s, "%14s[%2d] heap(%s) flags(%d) buffer (0x%x) addr(0x%x) size(%d) kmap_cnt(%d) kvaddr(0x%x)\n", "--buffer", buffer_cnt++,buffer->heap->name, (int)buffer->flags, (unsigned int)buffer,(unsigned int)buffer->vaddr ,(int)buffer->size,(int)buffer->kmap_cnt, (unsigned int)buffer->vaddr); } mutex_unlock(&buffer->lock); current_mmap_usage_rec = current_process_rec->address_using_list; while(current_mmap_usage_rec != NULL) { if(current_mmap_usage_rec->buffer == buffer) { seq_printf(s,"%16s mapping address[0x%x - 0x%x] size(%d)\n","----buffer",current_mmap_usage_rec->mapping_address,current_mmap_usage_rec->mapping_address+current_mmap_usage_rec->size,current_mmap_usage_rec->size); } current_mmap_usage_rec = current_mmap_usage_rec->next; } } mutex_unlock(&client->lock); buffer_cnt = 0; /* Next ce */ ce = ce->next; } /* Next pe */ pe = pe->next; } /* Finally, delete all entries in proclist */ destroy_proclist(&proclist); } break; #if 0 case DBIS_MODS: printk(KERN_INFO "DBIS_MODS\n"); break; #endif default: break; } return 0; } static int ion_debug_dbis_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_dbis_show, inode->i_private); } static const struct file_operations debug_dbis_fops = { .open = ion_debug_dbis_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void ion_debug_create_db(struct dentry *root) { int index; /* Create checking_leakage folder */ debug_db_root.checking_leakage = debugfs_create_dir("checking_leakage", root); INIT_LIST_HEAD(&debug_db_root.dbcl.child); /* Create ion_statistics folder & its children */ debug_db_root.ion_statistics = debugfs_create_dir("ion_statistics", root); for (index = 0; index <= _TOTAL_DBIS; ++index) { if (dbis_child_attr[index].attr == DBIS_FILE) { debug_db_root.dbis.child[index] = debugfs_create_file(dbis_child_attr[index].name, 0444, debug_db_root.ion_statistics,(void *)index, &debug_dbis_fops); } else {/* This is only for history now. */ debug_db_root.dbis.child[index] = debugfs_create_dir(dbis_child_attr[index].name, debug_db_root.ion_statistics); #if 0 for (his_index = 0; his_index < _TOTAL_DBIS; ++his_index) { debug_db_root.dbis.history_record[his_index] = debugfs_create_file(dbis_child_attr[index+his_index+1].name, 0444, debug_db_root.dbis.child[index], his_index+index+1, &debug_dbis_fops); } #endif /* client - Use (DBIS_CLIENTS + DBIS_DIR) to identify history/clients */ debug_db_root.dbis.history_record[0] = debugfs_create_file(dbis_child_attr[DBIS_CLIENTS].name, 0444, debug_db_root.dbis.child[index], (void *)(DBIS_CLIENTS + DBIS_DIR), &debug_dbis_fops); /* buffers - Use (DBIS_BUFFERS + DBIS_DIR) to identify history/buffers */ debug_db_root.dbis.history_record[1] = debugfs_create_file(dbis_child_attr[DBIS_BUFFERS].name, 0444, debug_db_root.dbis.child[index], (void *)(DBIS_BUFFERS + DBIS_DIR), &debug_dbis_fops); /* mmaps - Use (DBIS_MMAPS + DBIS_DIR) to identify history/mmaps */ debug_db_root.dbis.history_record[2] = debugfs_create_file(dbis_child_attr[DBIS_MMAPS].name, 0444, debug_db_root.dbis.child[index], (void *)(DBIS_MMAPS + DBIS_DIR), &debug_dbis_fops); /* fds - Use (DBIS_fdS + DBIS_DIR) to identify history/fds */ debug_db_root.dbis.history_record[3] = debugfs_create_file(dbis_child_attr[DBIS_FDS].name, 0444, debug_db_root.dbis.child[index], (void *)(DBIS_FDS + DBIS_DIR), &debug_dbis_fops); /* pids - Use (DBIS_PIDS + DBIS_DIR) to identify history/pids */ debug_db_root.dbis.history_record[4] = debugfs_create_file(dbis_child_attr[DBIS_PIDS].name, 0444, debug_db_root.dbis.child[index], (void *)(DBIS_PIDS + DBIS_DIR), &debug_dbis_fops); } } } static void ion_debug_db_create_clentry(pid_t pid) { struct list_head *pos, *n; struct dbcl_child *found; char process_id[6]; int index; /* Check whether pid is in the cl list*/ list_for_each_safe(pos, n, &debug_db_root.dbcl.child) { found = list_entry(pos, struct dbcl_child, entry); if ((pid_t)found->raw_key == pid) { /* We have found one. */ atomic_inc(&found->refcount); return; } } /* No existing entry */ found = kmalloc(sizeof(struct dbcl_child), GFP_KERNEL); found->raw_key = (void *)pid; snprintf(process_id, 6, "%d", pid); found->root = debugfs_create_dir(process_id, debug_db_root.checking_leakage); for (index = 0; index < _TOTAL_DBCL; ++index) { found->type[index] = debugfs_create_file(dbcl_child_name[index], 0444, found->root, (void *)((index << 16) | pid), &debug_dbcl_fops); } atomic_set(&found->refcount, 1); list_add_tail(&found->entry, &debug_db_root.dbcl.child); } static void ion_debug_db_destroy_clentry(pid_t pid) { struct list_head *pos, *n; struct dbcl_child *found; /* Check whether pid is in the cl list*/ list_for_each_safe(pos, n, &debug_db_root.dbcl.child) { found = list_entry(pos, struct dbcl_child, entry); if ((pid_t)found->raw_key == pid) { /* We have found one. */ if (atomic_dec_and_test(&found->refcount)) { /* Delete list entry, remove corresponding debugfs dir/files, free memory. */ list_del(&found->entry); debugfs_remove_recursive(found->root); kfree(found); } return; } } printk(KERN_DEBUG "Oh!!!!!\n"); } #endif
zhaochengw/android_kernel_blackview_p1-pro
drivers/staging/android/ion/ion.c
C
gpl-2.0
102,374
/********************************************************************** * * * Voreen - The Volume Rendering Engine * * * * Created between 2005 and 2012 by The Voreen Team * * as listed in CREDITS.TXT <http://www.voreen.org> * * * * This file is part of the Voreen software package. Voreen is free * * software: you can redistribute it and/or modify it under the terms * * of the GNU General Public License version 2 as published by the * * Free Software Foundation. * * * * Voreen is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * in the file "LICENSE.txt" along with this program. * * If not, see <http://www.gnu.org/licenses/>. * * * * The authors reserve all rights not expressly granted herein. For * * non-commercial academic use see the license exception specified in * * the file "LICENSE-academic.txt". To get information about * * commercial licensing please contact the authors. * * * **********************************************************************/ #include "voreen/core/io/volumewriter.h" #include "voreen/core/io/progressbar.h" namespace voreen { const std::string VolumeWriter::loggerCat_("voreen.io.VolumeWriter"); VolumeWriter::VolumeWriter(ProgressBar* progress) : progress_(progress) {} const std::vector<std::string>& VolumeWriter::getSupportedExtensions() const { return extensions_; } const std::vector<std::string>& VolumeWriter::getSupportedFilenames() const { return filenames_; } const std::vector<std::string>& VolumeWriter::getSupportedProtocols() const { return protocols_; } std::string VolumeWriter::getFileNameWithoutExtension(const std::string& filename) { return filename.substr(0, filename.rfind(".")); } std::string VolumeWriter::getExtension(const std::string& filename) { return filename.substr(filename.rfind(".") + 1, filename.length()); } void VolumeWriter::setProgressBar(ProgressBar* progressBar) { progress_ = progressBar; } ProgressBar* VolumeWriter::getProgressBar() const { return progress_; } } // namespace voreen
Elima85/bccfccraycaster
src/core/io/volumewriter.cpp
C++
gpl-2.0
2,993
/* This file was generated from java/lang/Long.java and is licensed under * the same terms. The copyright and license information for * java/lang/Long.java follows. * * Copyright (c) 1994, 2009, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* DO NOT EDIT THIS FILE - it is machine generated */ #include <jni.h> /* Header for class java_lang_Long */ #ifndef _Included_java_lang_Long #define _Included_java_lang_Long #ifdef __cplusplus extern "C" { #endif #undef java_lang_Long_serialVersionUID #define java_lang_Long_serialVersionUID -8742448824652078965LL #undef java_lang_Long_MIN_VALUE #define java_lang_Long_MIN_VALUE -9223372036854775808LL #undef java_lang_Long_MAX_VALUE #define java_lang_Long_MAX_VALUE 9223372036854775807LL #undef java_lang_Long_SIZE #define java_lang_Long_SIZE 64L #undef java_lang_Long_serialVersionUID #define java_lang_Long_serialVersionUID 4290774380558885855LL #ifdef __cplusplus } #endif #endif
AdmireTheDistance/android_libcore
ojluni/src/main/native/java_lang_Long.h
C
gpl-2.0
2,067
/* -*- c++ -*- ---------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://www.lammps.org/, Sandia National Laboratories Steve Plimpton, sjplimp@sandia.gov Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */ #ifdef COMPUTE_CLASS // clang-format off ComputeStyle(msd/chunk,ComputeMSDChunk); // clang-format on #else #ifndef LMP_COMPUTE_MSD_CHUNK_H #define LMP_COMPUTE_MSD_CHUNK_H #include "compute.h" namespace LAMMPS_NS { class ComputeMSDChunk : public Compute { public: ComputeMSDChunk(class LAMMPS *, int, char **); ~ComputeMSDChunk(); void init(); void setup(); void compute_array(); void lock_enable(); void lock_disable(); int lock_length(); void lock(class Fix *, bigint, bigint); void unlock(class Fix *); double memory_usage(); private: int nchunk; char *idchunk; class ComputeChunkAtom *cchunk; char *id_fix; class FixStore *fix; int firstflag; double *massproc, *masstotal; double **com, **comall; double **msd; void allocate(); }; } // namespace LAMMPS_NS #endif #endif /* ERROR/WARNING messages: E: Illegal ... command Self-explanatory. Check the input script syntax and compare to the documentation for the command. You can use -echo screen as a command-line option when running LAMMPS to see the offending line. E: Chunk/atom compute does not exist for compute msd/chunk Self-explanatory. E: Compute msd/chunk does not use chunk/atom compute The style of the specified compute is not chunk/atom. E: Could not find compute msd/chunk fix ID The compute creates an internal fix, which has been deleted. E: Compute msd/chunk nchunk is not static This is required because the MSD cannot be computed consistently if the number of chunks is changing. Compute chunk/atom allows setting nchunk to be static. */
jeremiahyan/lammps
src/compute_msd_chunk.h
C
gpl-2.0
2,208
#include <iostream> #include <storage/SystemInfo/SystemInfoImpl.h> using namespace std; using namespace storage; void test_cmd_dmsetup_table(SystemInfo::Impl& system_info) { try { const CmdDmsetupTable& cmd_dmsetup_table = system_info.getCmdDmsetupTable(); cout << "CmdDmsetupTable success" << endl; cout << cmd_dmsetup_table << endl; } catch (const exception& e) { cerr << "CmdDmsetupTable failed" << endl; } } int main() { set_logger(get_logfile_logger()); SystemInfo::Impl system_info; test_cmd_dmsetup_table(system_info); }
aschnell/libstorage-ng
examples/SystemInfo/test-cmd-dmsetup-table.cc
C++
gpl-2.0
577
<?php /** * @version $Id: view.html.php 19014 2012-11-28 04:48:56Z thailv $ * @package JSNUniform * @subpackage About * @author JoomlaShine Team <support@joomlashine.com> * @copyright Copyright (C) 2016 JoomlaShine.com. All Rights Reserved. * @license GNU/GPL v2 or later http://www.gnu.org/licenses/gpl-2.0.html * * Websites: http://www.joomlashine.com * Technical Support: Feedback - http://www.joomlashine.com/contact-us/get-support.html */ // No direct access to this file defined('_JEXEC') or die('Restricted access'); // Import Joomla view library jimport('joomla.application.component.view'); /** * View class for a list of Forms. * * @package Joomla.Administrator * @subpackage com_uniform * @since 1.5 */ class JSNUniformViewAbout extends JSNBaseView { /** * Execute and display a template script. * * @param string $tpl The name of the template file to parse; automatically searches through the template paths. * * @return mixed A string if successful, otherwise a JError object. */ function display($tpl = null) { // Get config parameters $config = JSNConfigHelper::get(); // Initialize toolbar JSNUniformHelper::initToolbar('JSN_UNIFORM_ABOUT', 'uniform-about', false); // Get messages $msgs = ''; if ( ! $config->get('disable_all_messages')) { $msgs = JSNUtilsMessage::getList('ABOUT'); $msgs = count($msgs) ? JSNUtilsMessage::showMessages($msgs) : ''; } // Load assets JSNUniformHelper::addAssets(); // Assign variables for rendering $this->assignRef('msgs', $msgs); // Load the submenu. $input = JFactory::getApplication()->input; JSNUniformHelper::addSubmenu($input->get('view', 'about')); // Display the template parent::display($tpl); } }
thusharaprimus/joomlaecommerce
administrator/components/com_uniform/views/about/view.html.php
PHP
gpl-2.0
1,780
<?php /** * @package AcyMailing for Joomla! * @version 4.9.2 * @author acyba.com * @copyright (C) 2009-2015 ACYBA S.A.R.L. All rights reserved. * @license GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html */ defined('_JEXEC') or die('Restricted access'); ?><?php JPluginHelper::importPlugin('acymailing'); $dispatcher = JDispatcher::getInstance(); $typesFilters = array(); $outputFilters = implode('',$dispatcher->trigger('onAcyDisplayFilters',array(&$typesFilters,'mail'))); if(empty($typesFilters)) return; $filterClass = acymailing_get('class.filter'); $filterClass->addJSFilterFunctions(); $doc = JFactory::getDocument(); $js = ''; $datatype = "filter"; $jsFunction = "addAcyFilter"; if(!empty($this->mail->$datatype)){ foreach($this->mail->{$datatype}['type'] as $num => $oneType){ if(empty($oneType)) continue; $js .= "while(!document.getElementById('".$datatype."type$num')){".$jsFunction."();} document.getElementById('".$datatype."type$num').value= '$oneType'; update".ucfirst($datatype)."($num);"; if(empty($this->mail->{$datatype}[$num][$oneType])) continue; foreach($this->mail->{$datatype}[$num][$oneType] as $key => $value){ $js .= "document.adminForm.elements['".$datatype."[$num][$oneType][$key]'].value = '".addslashes(str_replace(array("\n","\r"),' ',$value))."';"; $js .= "if(document.adminForm.elements['".$datatype."[$num][$oneType][$key]'].type && document.adminForm.elements['".$datatype."[$num][$oneType][$key]'].type == 'checkbox'){ document.adminForm.elements['".$datatype."[$num][$oneType][$key]'].checked = 'checked'; }"; } if($datatype == 'filter') $js.= " countresults($num);"; } } $doc->addScriptDeclaration( "window.addEvent('domready', function(){ $js });" ); $typevaluesFilters = array(); $typevaluesFilters[] = JHTML::_('select.option', '',JText::_('FILTER_SELECT')); foreach($typesFilters as $oneType => $oneName){ $typevaluesFilters[] = JHTML::_('select.option', $oneType,$oneName); } ?> <br /> <div class="acy_filter_mail"> <input type="hidden" name="data[mail][filter]" value="" /> <div id="acybase_filters" style="display:none"> <div id="filters_original"> <?php echo JHTML::_('select.genericlist', $typevaluesFilters, "filter[type][__num__]", 'class="inputbox" size="1" onchange="updateFilter(__num__);countresults(__num__);"', 'value', 'text','filtertype__num__');?> <span id="countresult___num__"></span> <div class="acyfilterarea" id="filterarea___num__"></div> </div> <?php echo $outputFilters; ?> </div> <?php echo JText::_('RECEIVER_LISTS').' '.JText::_('RECEIVER_FILTER'); ?> <fieldset class="adminform" > <legend><?php echo JText::_( 'ACY_FILTERS' ); ?></legend> <div id="allfilters"></div> <button class="btn btn-primary" onclick="addAcyFilter();return false;"><?php echo JText::_('ADD_FILTER'); ?></button> </fieldset> </div>
DanyCan/wisten.github.io
administrator/components/com_acymailing/views/newsletter/tmpl/filters.php
PHP
gpl-2.0
2,831
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */ /* * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA. * * Copyright 2008 Novell, Inc. * Copyright 2009 - 2012 Red Hat, Inc. */ #include "config.h" #include <string.h> #include <NetworkManager.h> #include <nm-connection.h> #include "nm-dbus-glib-types.h" #include "nm-remote-settings.h" #include "nm-remote-connection-private.h" #include "nm-object-private.h" #include "nm-dbus-helpers-private.h" #include "nm-glib-compat.h" #include "nm-object-private.h" /** * SECTION:nm-remote-settings * @Short_description: A helper for NetworkManager's settings API * @Title: NMRemoteSettings * @See_also:#NMRemoteConnection, #NMClient * * The #NMRemoteSettings object represents NetworkManager's "settings" service, * which stores network configuration and allows authenticated clients to * add, delete, and modify that configuration. The data required to connect * to a specific network is called a "connection" and encapsulated by the * #NMConnection object. Once a connection is known to NetworkManager, having * either been added by a user or read from on-disk storage, the * #NMRemoteSettings object creates a #NMRemoteConnection object which * represents this stored connection. Use the #NMRemoteConnection object to * perform any operations like modification or deletion. * * To add a new network connection to the NetworkManager settings service, first * build up a template #NMConnection object. Since this connection is not yet * added to NetworkManager, it is known only to your program and is not yet * an #NMRemoteConnection. Then ask #NMRemoteSettings to add your connection. * When the connection is added successfully, the supplied callback is called * and returns to your program the new #NMRemoteConnection which represents * the stored object known to NetworkManager. * * |[<!-- language="C" --> * static void * added_cb (NMRemoteSettings *settings, * NMRemoteConnection *remote, * GError *error, * gpointer user_data) * { * if (error) * g_print ("Error adding connection: %s", error->message); * else { * g_print ("Added: %s\n", nm_connection_get_path (NM_CONNECTION (remote))); * /&ast; Use 'remote' with nm_remote_connection_commit_changes() to save * * changes and nm_remote_connection_delete() to delete the connection &ast;/ * } * } * * static gboolean * add_wired_connection (const char *human_name) * { * NMConnection *connection; * NMSettingConnection *s_con; * NMSettingWired *s_wired; * char *uuid; * gboolean success; * * connection = nm_connection_new (); * * /&ast; Build up the 'connection' setting &ast;/ * s_con = (NMSettingConnection *) nm_setting_connection_new (); * uuid = nm_utils_uuid_generate (); * g_object_set (G_OBJECT (s_con), * NM_SETTING_CONNECTION_UUID, uuid, * NM_SETTING_CONNECTION_ID, human_name, * NM_SETTING_CONNECTION_TYPE, NM_SETTING_WIRED_SETTING_NAME, * NULL); * g_free (uuid); * nm_connection_add_setting (connection, NM_SETTING (s_con)); * * /&ast; Add the required 'wired' setting as this is a wired connection &ast;/ * nm_connection_add_setting (connection, nm_setting_wired_new ()); * * /&ast; Add an 'ipv4' setting using AUTO configuration (eg DHCP) &ast;/ * s_ip4 = (NMSettingIP4Config *) nm_setting_ip4_config_new (); * g_object_set (G_OBJECT (s_ip4), * NM_SETTING_IP4_CONFIG_METHOD, NM_SETTING_IP4_CONFIG_METHOD_AUTO, * NULL); * nm_connection_add_setting (connection, NM_SETTING (s_ip4)); * * /&ast; Ask NetworkManager to store the connection &ast;/ * success = nm_remote_settings_add_connection (settings, connection, added_cb, loop); * * /&ast; Release the template connection; the actual stored connection will * * be returned in added_cb() &ast;/ * g_object_unref (connection); * * /&ast; Let glib event loop run and added_cb() will be called when NetworkManager * * is done adding the new connection. &ast;/ * * return success; * } * ]| */ static void nm_remote_settings_initable_iface_init (GInitableIface *iface); static void nm_remote_settings_async_initable_iface_init (GAsyncInitableIface *iface); G_DEFINE_TYPE_WITH_CODE (NMRemoteSettings, nm_remote_settings, G_TYPE_OBJECT, G_IMPLEMENT_INTERFACE (G_TYPE_INITABLE, nm_remote_settings_initable_iface_init); G_IMPLEMENT_INTERFACE (G_TYPE_ASYNC_INITABLE, nm_remote_settings_async_initable_iface_init); ) #define NM_REMOTE_SETTINGS_GET_PRIVATE(o) (G_TYPE_INSTANCE_GET_PRIVATE ((o), NM_TYPE_REMOTE_SETTINGS, NMRemoteSettingsPrivate)) typedef struct { DBusGConnection *bus; gboolean private_bus; gboolean inited; DBusGProxy *proxy; GHashTable *connections; GHashTable *pending; /* Connections we don't have settings for yet */ gboolean service_running; guint32 init_left; /* AddConnectionInfo objects that are waiting for the connection to become initialized */ GSList *add_list; DBusGProxy *props_proxy; char *hostname; gboolean can_modify; DBusGProxy *dbus_proxy; DBusGProxyCall *listcon_call; } NMRemoteSettingsPrivate; enum { PROP_0, PROP_BUS, PROP_SERVICE_RUNNING, PROP_HOSTNAME, PROP_CAN_MODIFY, LAST_PROP }; /* Signals */ enum { NEW_CONNECTION, CONNECTIONS_READ, LAST_SIGNAL }; static guint signals[LAST_SIGNAL] = { 0 }; /**********************************************************************/ /** * nm_remote_settings_error_quark: * * Registers an error quark for #NMRemoteSettings if necessary. * * Returns: the error quark used for #NMRemoteSettings errors. **/ GQuark nm_remote_settings_error_quark (void) { static GQuark quark; if (G_UNLIKELY (!quark)) quark = g_quark_from_static_string ("nm-remote-settings-error-quark"); return quark; } /**********************************************************************/ static void _nm_remote_settings_ensure_inited (NMRemoteSettings *self) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); GError *error = NULL; if (!priv->inited) { if (!g_initable_init (G_INITABLE (self), NULL, &error)) { /* Don't warn when the call times out because the settings service can't * be activated or whatever. */ if (!g_error_matches (error, DBUS_GERROR, DBUS_GERROR_NO_REPLY)) { g_warning ("%s: (NMRemoteSettings) error initializing: %s\n", __func__, error->message); } g_error_free (error); } priv->inited = TRUE; } } /**********************************************************************/ typedef struct { NMRemoteSettings *self; NMRemoteSettingsAddConnectionFunc callback; gpointer callback_data; NMRemoteConnection *connection; } AddConnectionInfo; static AddConnectionInfo * add_connection_info_find (NMRemoteSettings *self, NMRemoteConnection *connection) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); GSList *iter; for (iter = priv->add_list; iter; iter = g_slist_next (iter)) { AddConnectionInfo *info = iter->data; if (info->connection == connection) return info; } return NULL; } static void add_connection_info_dispose (NMRemoteSettings *self, AddConnectionInfo *info) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); priv->add_list = g_slist_remove (priv->add_list, info); g_free (info); } static void add_connection_info_complete (NMRemoteSettings *self, AddConnectionInfo *info, GError *error) { g_return_if_fail (info != NULL); info->callback (info->self, error ? NULL : info->connection, error, info->callback_data); add_connection_info_dispose (self, info); } /** * nm_remote_settings_get_connection_by_id: * @settings: the %NMRemoteSettings * @id: the id of the remote connection * * Returns the first matching %NMRemoteConnection matching a given @id. * * Returns: (transfer none): the remote connection object on success, or %NULL if no * matching object was found. * * Since: 0.9.10 **/ NMRemoteConnection * nm_remote_settings_get_connection_by_id (NMRemoteSettings *settings, const char *id) { NMRemoteSettingsPrivate *priv; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), NULL); g_return_val_if_fail (id != NULL, NULL); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (priv->service_running) { GHashTableIter iter; NMConnection *candidate; g_hash_table_iter_init (&iter, priv->connections); while (g_hash_table_iter_next (&iter, NULL, (gpointer *) &candidate)) { if (!strcmp (id, nm_connection_get_id (candidate))) return NM_REMOTE_CONNECTION (candidate); } } return NULL; } /** * nm_remote_settings_get_connection_by_path: * @settings: the %NMRemoteSettings * @path: the D-Bus object path of the remote connection * * Returns the %NMRemoteConnection representing the connection at @path. * * Returns: (transfer none): the remote connection object on success, or %NULL if the object was * not known **/ NMRemoteConnection * nm_remote_settings_get_connection_by_path (NMRemoteSettings *settings, const char *path) { NMRemoteSettingsPrivate *priv; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), NULL); g_return_val_if_fail (path != NULL, NULL); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); return priv->service_running ? g_hash_table_lookup (priv->connections, path) : NULL; } /** * nm_remote_settings_get_connection_by_uuid: * @settings: the %NMRemoteSettings * @uuid: the UUID of the remote connection * * Returns the %NMRemoteConnection identified by @uuid. * * Returns: (transfer none): the remote connection object on success, or %NULL if the object was * not known **/ NMRemoteConnection * nm_remote_settings_get_connection_by_uuid (NMRemoteSettings *settings, const char *uuid) { NMRemoteSettingsPrivate *priv; GHashTableIter iter; NMRemoteConnection *candidate; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), NULL); g_return_val_if_fail (uuid != NULL, NULL); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (priv->service_running) { g_hash_table_iter_init (&iter, priv->connections); while (g_hash_table_iter_next (&iter, NULL, (gpointer) &candidate)) { if (g_strcmp0 (uuid, nm_connection_get_uuid (NM_CONNECTION (candidate))) == 0) return candidate; } } return NULL; } static void connection_removed_cb (NMRemoteConnection *remote, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); AddConnectionInfo *addinfo; GError *add_error; const char *path; /* Might have been removed while it was waiting to be initialized */ addinfo = add_connection_info_find (self, remote); if (addinfo) { add_error = g_error_new_literal (NM_REMOTE_SETTINGS_ERROR, NM_REMOTE_SETTINGS_ERROR_CONNECTION_REMOVED, "Connection removed before it was initialized"); add_connection_info_complete (self, addinfo, add_error); g_error_free (add_error); } path = nm_connection_get_path (NM_CONNECTION (remote)); g_hash_table_remove (priv->connections, path); g_hash_table_remove (priv->pending, path); } static void connection_visible_cb (NMRemoteConnection *remote, gboolean visible, gpointer user_data); /* Takes a reference to the connection when adding to 'to' */ static void move_connection (NMRemoteSettings *self, NMRemoteConnection *remote, GHashTable *from, GHashTable *to) { const char *path = nm_connection_get_path (NM_CONNECTION (remote)); g_hash_table_insert (to, g_strdup (path), g_object_ref (remote)); if (from) g_hash_table_remove (from, path); /* Setup connection signals since removing from 'from' clears them, but * also the first time the connection is added to a hash if 'from' is NULL. */ if (!g_signal_handler_find (remote, G_SIGNAL_MATCH_FUNC, 0, 0, NULL, connection_removed_cb, NULL)) { g_signal_connect (remote, NM_REMOTE_CONNECTION_REMOVED, G_CALLBACK (connection_removed_cb), self); } if (!g_signal_handler_find (remote, G_SIGNAL_MATCH_FUNC, 0, 0, NULL, connection_visible_cb, NULL)) { g_signal_connect (remote, "visible", G_CALLBACK (connection_visible_cb), self); } } static void connection_visible_cb (NMRemoteConnection *remote, gboolean visible, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); const char *path; path = nm_connection_get_path (NM_CONNECTION (remote)); g_assert (path); /* When a connection becomes invisible, we put it back in the pending * hash until it becomes visible again. When it does, we move it back to * the normal connections hash. */ if (visible) { /* Connection visible to this user again */ if (g_hash_table_lookup (priv->pending, path)) { /* Move connection from pending to visible hash; emit for clients */ move_connection (self, remote, priv->pending, priv->connections); g_signal_emit (self, signals[NEW_CONNECTION], 0, remote); } } else { /* Connection now invisible to this user */ if (g_hash_table_lookup (priv->connections, path)) { /* Move connection to pending hash and wait for it to become visible again */ move_connection (self, remote, priv->connections, priv->pending); /* Signal to clients that the connection is gone; but we have to * block our connection removed handler so we don't destroy * the connection when the signal is emitted. */ g_signal_handlers_block_by_func (remote, connection_removed_cb, self); g_signal_emit_by_name (remote, NM_REMOTE_CONNECTION_REMOVED); g_signal_handlers_unblock_by_func (remote, connection_removed_cb, self); } } } static void connection_inited (GObject *source, GAsyncResult *result, gpointer user_data) { NMRemoteConnection *remote = NM_REMOTE_CONNECTION (source); NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); AddConnectionInfo *addinfo; const char *path; GError *error = NULL, *local; path = nm_connection_get_path (NM_CONNECTION (remote)); addinfo = add_connection_info_find (self, remote); if (g_async_initable_init_finish (G_ASYNC_INITABLE (remote), result, &error)) { /* Connection is initialized and visible; expose it to clients */ move_connection (self, remote, priv->pending, priv->connections); /* If there's a pending AddConnection request, complete that here before * signaling new-connection. */ if (addinfo) add_connection_info_complete (self, addinfo, NULL); /* Finally, let users know of the new connection now that it has all * its settings and is valid. */ g_signal_emit (self, signals[NEW_CONNECTION], 0, remote); } else { if (addinfo) { local = g_error_new (NM_REMOTE_SETTINGS_ERROR, NM_REMOTE_SETTINGS_ERROR_CONNECTION_UNAVAILABLE, "Connection not visible or not available: %s", error ? error->message : "(unknown)"); add_connection_info_complete (self, addinfo, local); g_error_free (local); } /* PermissionDenied means the connection isn't visible to this user, so * keep it in priv->pending to be notified later of visibility changes. * Otherwise forget it. */ if (!dbus_g_error_has_name (error, "org.freedesktop.NetworkManager.Settings.PermissionDenied")) g_hash_table_remove (priv->pending, path); g_error_free (error); } /* Let listeners know that all connections have been found */ priv->init_left--; if (priv->init_left == 0) g_signal_emit (self, signals[CONNECTIONS_READ], 0); g_object_unref (self); } static NMRemoteConnection * new_connection_cb (DBusGProxy *proxy, const char *path, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); NMRemoteConnection *connection = NULL; /* Make double-sure we don't already have it */ connection = g_hash_table_lookup (priv->pending, path); if (connection) return connection; connection = g_hash_table_lookup (priv->connections, path); if (connection) return connection; /* Create a new connection object for it */ connection = nm_remote_connection_new (priv->bus, path); if (connection) { g_async_initable_init_async (G_ASYNC_INITABLE (connection), G_PRIORITY_DEFAULT, NULL, connection_inited, g_object_ref (self)); /* Add the connection to the pending table to wait for it to retrieve * it's settings asynchronously over D-Bus. The connection isn't * really valid until it has all its settings, so hide it until it does. */ move_connection (self, connection, NULL, priv->pending); g_object_unref (connection); /* move_connection() takes a ref */ } return connection; } static void fetch_connections_done (DBusGProxy *proxy, DBusGProxyCall *call, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); GPtrArray *connections; GError *error = NULL; int i; g_warn_if_fail (priv->listcon_call == call); priv->listcon_call = NULL; if (!dbus_g_proxy_end_call (proxy, call, &error, DBUS_TYPE_G_ARRAY_OF_OBJECT_PATH, &connections, G_TYPE_INVALID)) { if ( !g_error_matches (error, DBUS_GERROR, DBUS_GERROR_SERVICE_UNKNOWN) && !g_error_matches (error, DBUS_GERROR, DBUS_GERROR_NAME_HAS_NO_OWNER) && priv->service_running) { g_warning ("%s: error fetching connections: (%d) %s.", __func__, error->code, error->message ? error->message : "(unknown)"); } g_clear_error (&error); /* We tried to read connections and failed */ g_signal_emit (self, signals[CONNECTIONS_READ], 0); return; } /* Let listeners know we are done getting connections */ if (connections->len == 0) g_signal_emit (self, signals[CONNECTIONS_READ], 0); else { priv->init_left = connections->len; for (i = 0; i < connections->len; i++) { char *path = g_ptr_array_index (connections, i); new_connection_cb (proxy, path, user_data); g_free (path); } } g_ptr_array_free (connections, TRUE); } /** * nm_remote_settings_list_connections: * @settings: the %NMRemoteSettings * * Returns: (transfer container) (element-type NMRemoteConnection): a * list containing all connections provided by the remote settings service. * Each element of the returned list is a %NMRemoteConnection instance, which is * owned by the %NMRemoteSettings object and should not be freed by the caller. * The returned list is, however, owned by the caller and should be freed * using g_slist_free() when no longer required. **/ GSList * nm_remote_settings_list_connections (NMRemoteSettings *settings) { NMRemoteSettingsPrivate *priv; GSList *list = NULL; GHashTableIter iter; gpointer value; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), NULL); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (priv->service_running) { g_hash_table_iter_init (&iter, priv->connections); while (g_hash_table_iter_next (&iter, NULL, &value)) list = g_slist_prepend (list, NM_REMOTE_CONNECTION (value)); } return list; } static void add_connection_done (DBusGProxy *proxy, DBusGProxyCall *call, gpointer user_data) { AddConnectionInfo *info = user_data; GError *error = NULL; char *path = NULL; if (dbus_g_proxy_end_call (proxy, call, &error, DBUS_TYPE_G_OBJECT_PATH, &path, G_TYPE_INVALID)) { info->connection = new_connection_cb (proxy, path, info->self); g_assert (info->connection); /* Wait until this connection is fully initialized before calling the callback */ g_free (path); } else add_connection_info_complete (info->self, info, error); g_clear_error (&error); } /** * nm_remote_settings_add_connection: * @settings: the %NMRemoteSettings * @connection: the connection to add. Note that this object's settings will be * added, not the object itself * @callback: (scope async): callback to be called when the add operation completes * @user_data: (closure): caller-specific data passed to @callback * * Requests that the remote settings service add the given settings to a new * connection. The connection is immediately written to disk. @connection is * untouched by this function and only serves as a template of the settings to * add. The #NMRemoteConnection object that represents what NetworkManager * actually added is returned to @callback when the addition operation is complete. * * Note that the #NMRemoteConnection returned in @callback may not contain * identical settings to @connection as NetworkManager may perform automatic * completion and/or normalization of connection properties. * * Returns: %TRUE if the request was successful, %FALSE if it failed **/ gboolean nm_remote_settings_add_connection (NMRemoteSettings *settings, NMConnection *connection, NMRemoteSettingsAddConnectionFunc callback, gpointer user_data) { NMRemoteSettingsPrivate *priv; AddConnectionInfo *info; GHashTable *new_settings; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), FALSE); g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE); g_return_val_if_fail (callback != NULL, FALSE); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (!priv->service_running) return FALSE; info = g_malloc0 (sizeof (AddConnectionInfo)); info->self = settings; info->callback = callback; info->callback_data = user_data; new_settings = nm_connection_to_hash (connection, NM_SETTING_HASH_FLAG_ALL); dbus_g_proxy_begin_call (priv->proxy, "AddConnection", add_connection_done, info, NULL, DBUS_TYPE_G_MAP_OF_MAP_OF_VARIANT, new_settings, G_TYPE_INVALID); g_hash_table_destroy (new_settings); priv->add_list = g_slist_append (priv->add_list, info); return TRUE; } /** * nm_remote_settings_add_connection_unsaved: * @settings: the %NMRemoteSettings * @connection: the connection to add. Note that this object's settings will be * added, not the object itself * @callback: (scope async): callback to be called when the add operation completes * @user_data: (closure): caller-specific data passed to @callback * * Requests that the remote settings service add the given settings to a new * connection. The connection is not written to disk, which may be done at * a later time by calling the connection's nm_remote_connection_commit_changes() * method. * * Returns: %TRUE if the request was successful, %FALSE if it failed * * Since: 0.9.10 **/ gboolean nm_remote_settings_add_connection_unsaved (NMRemoteSettings *settings, NMConnection *connection, NMRemoteSettingsAddConnectionFunc callback, gpointer user_data) { NMRemoteSettingsPrivate *priv; AddConnectionInfo *info; GHashTable *new_settings; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), FALSE); g_return_val_if_fail (NM_IS_CONNECTION (connection), FALSE); g_return_val_if_fail (callback != NULL, FALSE); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (!priv->service_running) return FALSE; info = g_malloc0 (sizeof (AddConnectionInfo)); info->self = settings; info->callback = callback; info->callback_data = user_data; new_settings = nm_connection_to_hash (connection, NM_SETTING_HASH_FLAG_ALL); dbus_g_proxy_begin_call (priv->proxy, "AddConnectionUnsaved", add_connection_done, info, NULL, DBUS_TYPE_G_MAP_OF_MAP_OF_VARIANT, new_settings, G_TYPE_INVALID); g_hash_table_destroy (new_settings); priv->add_list = g_slist_append (priv->add_list, info); return TRUE; } /** * nm_remote_settings_load_connections: * @settings: the %NMRemoteSettings * @filenames: %NULL-terminated array of filenames to load * @failures: (out) (transfer full): on return, a %NULL-terminated array of * filenames that failed to load * @error: return location for #GError * * Requests that the remote settings service load or reload the given files, * adding or updating the connections described within. * * The changes to the indicated files will not yet be reflected in * @settings's connections array when the function returns. * * If all of the indicated files were successfully loaded, the * function will return %TRUE, and @failures will be set to %NULL. If * NetworkManager tried to load the files, but some (or all) failed, * then @failures will be set to a %NULL-terminated array of the * filenames that failed to load. * Returns: %TRUE if NetworkManager at least tried to load @filenames, * %FALSE if an error occurred (eg, permission denied). * * Since: 0.9.10 **/ gboolean nm_remote_settings_load_connections (NMRemoteSettings *settings, char **filenames, char ***failures, GError **error) { NMRemoteSettingsPrivate *priv; char **my_failures = NULL; gboolean ret; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), FALSE); g_return_val_if_fail (filenames != NULL, FALSE); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (!priv->service_running) { g_set_error_literal (error, NM_REMOTE_SETTINGS_ERROR, NM_REMOTE_SETTINGS_ERROR_SERVICE_UNAVAILABLE, "NetworkManager is not running."); return FALSE; } if (!dbus_g_proxy_call (priv->proxy, "LoadConnections", error, G_TYPE_STRV, filenames, G_TYPE_INVALID, G_TYPE_BOOLEAN, &ret, G_TYPE_STRV, &my_failures, G_TYPE_INVALID)) ret = FALSE; if (failures) { if (my_failures && !*my_failures) g_clear_pointer (&my_failures, g_free); *failures = my_failures; } else g_strfreev (my_failures); return ret; } /** * nm_remote_settings_reload_connections: * @settings: the #NMRemoteSettings * @error: return location for #GError * * Requests that the remote settings service reload all connection * files from disk, adding, updating, and removing connections until * the in-memory state matches the on-disk state. * * Return value: %TRUE on success, %FALSE on failure * * Since: 0.9.10 **/ gboolean nm_remote_settings_reload_connections (NMRemoteSettings *settings, GError **error) { NMRemoteSettingsPrivate *priv; gboolean success; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), FALSE); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (!priv->service_running) { g_set_error_literal (error, NM_REMOTE_SETTINGS_ERROR, NM_REMOTE_SETTINGS_ERROR_SERVICE_UNAVAILABLE, "NetworkManager is not running."); return FALSE; } if (!dbus_g_proxy_call (priv->proxy, "ReloadConnections", error, G_TYPE_INVALID, G_TYPE_BOOLEAN, &success, G_TYPE_INVALID)) return FALSE; return success; } static void clear_one_hash (GHashTable *table) { GHashTableIter iter; gpointer value; GSList *list = NULL, *list_iter; /* Build up the list of connections; we can't emit "removed" during hash * table iteration because emission of the "removed" signal may trigger code * that explicitly removes the connection from the hash table somewhere * else. */ g_hash_table_iter_init (&iter, table); while (g_hash_table_iter_next (&iter, NULL, &value)) list = g_slist_prepend (list, NM_REMOTE_CONNECTION (value)); for (list_iter = list; list_iter; list_iter = g_slist_next (list_iter)) g_signal_emit_by_name (NM_REMOTE_CONNECTION (list_iter->data), NM_REMOTE_CONNECTION_REMOVED); g_slist_free (list); g_hash_table_remove_all (table); } typedef struct { NMRemoteSettings *settings; NMRemoteSettingsSaveHostnameFunc callback; gpointer callback_data; } SaveHostnameInfo; static void save_hostname_cb (DBusGProxy *proxy, DBusGProxyCall *call, gpointer user_data) { SaveHostnameInfo *info = user_data; GError *error = NULL; dbus_g_proxy_end_call (proxy, call, &error, G_TYPE_INVALID); if (info->callback != NULL) info->callback (info->settings, error, info->callback_data); g_clear_error (&error); } /** * nm_remote_settings_save_hostname: * @settings: the %NMRemoteSettings * @hostname: the new persistent hostname to set, or %NULL to clear any existing * persistent hostname * @callback: (scope async) (allow-none): callback to be called when the * hostname operation completes * @user_data: (closure): caller-specific data passed to @callback * * Requests that the machine's persistent hostname be set to the specified value * or cleared. * * Returns: %TRUE if the request was successful, %FALSE if it failed **/ gboolean nm_remote_settings_save_hostname (NMRemoteSettings *settings, const char *hostname, NMRemoteSettingsSaveHostnameFunc callback, gpointer user_data) { NMRemoteSettingsPrivate *priv; SaveHostnameInfo *info; g_return_val_if_fail (NM_IS_REMOTE_SETTINGS (settings), FALSE); g_return_val_if_fail (hostname != NULL, FALSE); g_return_val_if_fail (callback != NULL, FALSE); priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); _nm_remote_settings_ensure_inited (settings); if (!priv->service_running) return FALSE; info = g_malloc0 (sizeof (SaveHostnameInfo)); info->settings = settings; info->callback = callback; info->callback_data = user_data; dbus_g_proxy_begin_call (priv->proxy, "SaveHostname", save_hostname_cb, info, g_free, G_TYPE_STRING, hostname ? hostname : "", G_TYPE_INVALID); return TRUE; } static void properties_changed_cb (DBusGProxy *proxy, GHashTable *properties, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); GHashTableIter iter; gpointer key, tmp; g_hash_table_iter_init (&iter, properties); while (g_hash_table_iter_next (&iter, &key, &tmp)) { GValue *value = tmp; if (!strcmp ((const char *) key, "Hostname")) { g_free (priv->hostname); priv->hostname = g_value_dup_string (value); g_object_notify (G_OBJECT (self), NM_REMOTE_SETTINGS_HOSTNAME); } if (!strcmp ((const char *) key, "CanModify")) { priv->can_modify = g_value_get_boolean (value); g_object_notify (G_OBJECT (self), NM_REMOTE_SETTINGS_CAN_MODIFY); } } } static void nm_appeared_got_properties (DBusGProxy *proxy, DBusGProxyCall *call, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); GHashTable *props = NULL; if (dbus_g_proxy_end_call (proxy, call, NULL, DBUS_TYPE_G_MAP_OF_VARIANT, &props, G_TYPE_INVALID)) { properties_changed_cb (priv->props_proxy, props, self); g_hash_table_destroy (props); } } static void name_owner_changed (DBusGProxy *proxy, const char *name, const char *old_owner, const char *new_owner, gpointer user_data) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (user_data); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); const char *sname = NM_DBUS_SERVICE; if (!strcmp (name, sname)) { if (new_owner && strlen (new_owner) > 0) { priv->service_running = TRUE; priv->listcon_call = dbus_g_proxy_begin_call (priv->proxy, "ListConnections", fetch_connections_done, self, NULL, G_TYPE_INVALID); dbus_g_proxy_begin_call (priv->props_proxy, "GetAll", nm_appeared_got_properties, self, NULL, G_TYPE_STRING, NM_DBUS_IFACE_SETTINGS, G_TYPE_INVALID); } else { priv->service_running = FALSE; clear_one_hash (priv->pending); clear_one_hash (priv->connections); /* Clear properties */ g_free (priv->hostname); priv->hostname = NULL; g_object_notify (G_OBJECT (self), NM_REMOTE_SETTINGS_HOSTNAME); priv->can_modify = FALSE; g_object_notify (G_OBJECT (self), NM_REMOTE_SETTINGS_CAN_MODIFY); if (priv->listcon_call) { dbus_g_proxy_cancel_call (priv->proxy, priv->listcon_call); priv->listcon_call = NULL; } } g_object_notify (G_OBJECT (self), NM_REMOTE_SETTINGS_SERVICE_RUNNING); } } /****************************************************************/ /** * nm_remote_settings_new: * @bus: (allow-none): a valid and connected D-Bus connection * * Creates a new object representing the remote settings service. * * Note that this will do blocking D-Bus calls to initialize the * settings object. You can use nm_remote_settings_new_async() if you * want to avoid that. * * Returns: the new remote settings object on success, or %NULL on failure **/ NMRemoteSettings * nm_remote_settings_new (DBusGConnection *bus) { NMRemoteSettings *self; self = g_object_new (NM_TYPE_REMOTE_SETTINGS, NM_REMOTE_SETTINGS_BUS, bus, NULL); _nm_remote_settings_ensure_inited (self); return self; } static void remote_settings_inited (GObject *source, GAsyncResult *result, gpointer user_data) { GSimpleAsyncResult *simple = user_data; GError *error = NULL; if (!g_async_initable_init_finish (G_ASYNC_INITABLE (source), result, &error)) g_simple_async_result_take_error (simple, error); else g_simple_async_result_set_op_res_gpointer (simple, source, g_object_unref); g_simple_async_result_complete (simple); g_object_unref (simple); } /** * nm_remote_settings_new_async: * @bus: (allow-none): a valid and connected D-Bus connection * @cancellable: a #GCancellable, or %NULL * @callback: callback to call when the settings object is created * @user_data: data for @callback * * Creates a new object representing the remote settings service and * begins asynchronously initializing it. @callback will be called * when it is done; use nm_remote_settings_new_finish() to get the * result. **/ void nm_remote_settings_new_async (DBusGConnection *bus, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { NMRemoteSettings *self; GSimpleAsyncResult *simple; simple = g_simple_async_result_new (NULL, callback, user_data, nm_remote_settings_new_async); self = g_object_new (NM_TYPE_REMOTE_SETTINGS, NM_REMOTE_SETTINGS_BUS, bus, NULL); g_async_initable_init_async (G_ASYNC_INITABLE (self), G_PRIORITY_DEFAULT, cancellable, remote_settings_inited, simple); } /** * nm_remote_settings_new_finish: * @result: a #GAsyncResult * @error: location for a #GError, or %NULL * * Gets the result of an nm_remote_settings_new_async() call. * * Returns: a new #NMRemoteSettings object, or %NULL on error **/ NMRemoteSettings * nm_remote_settings_new_finish (GAsyncResult *result, GError **error) { GSimpleAsyncResult *simple; g_return_val_if_fail (g_simple_async_result_is_valid (result, NULL, nm_remote_settings_new_async), NULL); simple = G_SIMPLE_ASYNC_RESULT (result); if (g_simple_async_result_propagate_error (simple, error)) return NULL; else return g_object_ref (g_simple_async_result_get_op_res_gpointer (simple)); } static void forget_connection (gpointer user_data) { NMRemoteConnection *remote = NM_REMOTE_CONNECTION (user_data); g_signal_handlers_disconnect_matched (remote, G_SIGNAL_MATCH_FUNC, 0, 0, NULL, connection_removed_cb, NULL); g_signal_handlers_disconnect_matched (remote, G_SIGNAL_MATCH_FUNC, 0, 0, NULL, connection_visible_cb, NULL); g_object_unref (remote); } static void nm_remote_settings_init (NMRemoteSettings *self) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); priv->connections = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, forget_connection); priv->pending = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, forget_connection); } static void constructed (GObject *object) { NMRemoteSettingsPrivate *priv; priv = NM_REMOTE_SETTINGS_GET_PRIVATE (object); if (priv->private_bus == FALSE) { /* D-Bus proxy for clearing connections on NameOwnerChanged */ priv->dbus_proxy = dbus_g_proxy_new_for_name (priv->bus, DBUS_SERVICE_DBUS, DBUS_PATH_DBUS, DBUS_INTERFACE_DBUS); g_assert (priv->dbus_proxy); dbus_g_object_register_marshaller (g_cclosure_marshal_generic, G_TYPE_NONE, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_INVALID); dbus_g_proxy_add_signal (priv->dbus_proxy, "NameOwnerChanged", G_TYPE_STRING, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_INVALID); dbus_g_proxy_connect_signal (priv->dbus_proxy, "NameOwnerChanged", G_CALLBACK (name_owner_changed), object, NULL); } priv->proxy = _nm_dbus_new_proxy_for_connection (priv->bus, NM_DBUS_PATH_SETTINGS, NM_DBUS_IFACE_SETTINGS); g_assert (priv->proxy); dbus_g_proxy_set_default_timeout (priv->proxy, G_MAXINT); dbus_g_proxy_add_signal (priv->proxy, "NewConnection", DBUS_TYPE_G_OBJECT_PATH, G_TYPE_INVALID); dbus_g_proxy_connect_signal (priv->proxy, "NewConnection", G_CALLBACK (new_connection_cb), object, NULL); /* D-Bus properties proxy */ priv->props_proxy = _nm_dbus_new_proxy_for_connection (priv->bus, NM_DBUS_PATH_SETTINGS, "org.freedesktop.DBus.Properties"); g_assert (priv->props_proxy); /* Monitor properties */ dbus_g_object_register_marshaller (g_cclosure_marshal_generic, G_TYPE_NONE, DBUS_TYPE_G_MAP_OF_VARIANT, G_TYPE_INVALID); dbus_g_proxy_add_signal (priv->proxy, "PropertiesChanged", DBUS_TYPE_G_MAP_OF_VARIANT, G_TYPE_INVALID); dbus_g_proxy_connect_signal (priv->proxy, "PropertiesChanged", G_CALLBACK (properties_changed_cb), object, NULL); } static gboolean init_sync (GInitable *initable, GCancellable *cancellable, GError **error) { NMRemoteSettings *settings = NM_REMOTE_SETTINGS (initable); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (settings); GHashTable *props; if (priv->private_bus == FALSE) { if (!dbus_g_proxy_call (priv->dbus_proxy, "NameHasOwner", error, G_TYPE_STRING, NM_DBUS_SERVICE, G_TYPE_INVALID, G_TYPE_BOOLEAN, &priv->service_running, G_TYPE_INVALID)) { priv->service_running = FALSE; return FALSE; } /* If NM isn't running we'll grab properties from name_owner_changed() * when it starts. */ if (!priv->service_running) return TRUE; } else priv->service_running = TRUE; priv->listcon_call = dbus_g_proxy_begin_call (priv->proxy, "ListConnections", fetch_connections_done, NM_REMOTE_SETTINGS (initable), NULL, G_TYPE_INVALID); /* Get properties */ if (!dbus_g_proxy_call (priv->props_proxy, "GetAll", error, G_TYPE_STRING, NM_DBUS_IFACE_SETTINGS, G_TYPE_INVALID, DBUS_TYPE_G_MAP_OF_VARIANT, &props, G_TYPE_INVALID)) return FALSE; properties_changed_cb (priv->props_proxy, props, settings); g_hash_table_destroy (props); return TRUE; } typedef struct { NMRemoteSettings *settings; GSimpleAsyncResult *result; } NMRemoteSettingsInitData; static void init_async_complete (NMRemoteSettingsInitData *init_data) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (init_data->settings); priv->inited = TRUE; g_simple_async_result_complete (init_data->result); g_object_unref (init_data->result); g_slice_free (NMRemoteSettingsInitData, init_data); } static void init_read_connections (NMRemoteSettings *settings, gpointer user_data) { NMRemoteSettingsInitData *init_data = user_data; g_signal_handlers_disconnect_by_func (settings, G_CALLBACK (init_read_connections), user_data); init_async_complete (init_data); } static void init_async_got_properties (DBusGProxy *proxy, DBusGProxyCall *call, gpointer user_data) { NMRemoteSettingsInitData *init_data = user_data; NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (init_data->settings); GHashTable *props; GError *error = NULL; if (dbus_g_proxy_end_call (proxy, call, &error, DBUS_TYPE_G_MAP_OF_VARIANT, &props, G_TYPE_INVALID)) { properties_changed_cb (priv->props_proxy, props, init_data->settings); g_hash_table_destroy (props); g_simple_async_result_set_op_res_gboolean (init_data->result, TRUE); } else g_simple_async_result_take_error (init_data->result, error); /* Read connections and wait for the result */ priv->listcon_call = dbus_g_proxy_begin_call (priv->proxy, "ListConnections", fetch_connections_done, init_data->settings, NULL, G_TYPE_INVALID); g_signal_connect (init_data->settings, "connections-read", G_CALLBACK (init_read_connections), init_data); } static void init_get_properties (NMRemoteSettingsInitData *init_data) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (init_data->settings); dbus_g_proxy_begin_call (priv->props_proxy, "GetAll", init_async_got_properties, init_data, NULL, G_TYPE_STRING, NM_DBUS_IFACE_SETTINGS, G_TYPE_INVALID); } static void init_async_got_manager_running (DBusGProxy *proxy, DBusGProxyCall *call, gpointer user_data) { NMRemoteSettingsInitData *init_data = user_data; NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (init_data->settings); GError *error = NULL; if (!dbus_g_proxy_end_call (proxy, call, &error, G_TYPE_BOOLEAN, &priv->service_running, G_TYPE_INVALID)) { g_simple_async_result_take_error (init_data->result, error); init_async_complete (init_data); return; } if (!priv->service_running) { g_simple_async_result_set_op_res_gboolean (init_data->result, TRUE); init_async_complete (init_data); return; } init_get_properties (init_data); } static void init_async (GAsyncInitable *initable, int io_priority, GCancellable *cancellable, GAsyncReadyCallback callback, gpointer user_data) { NMRemoteSettingsInitData *init_data; NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (initable); init_data = g_slice_new0 (NMRemoteSettingsInitData); init_data->settings = NM_REMOTE_SETTINGS (initable); init_data->result = g_simple_async_result_new (G_OBJECT (initable), callback, user_data, init_async); if (priv->private_bus) { priv->service_running = TRUE; init_get_properties (init_data); } else { /* Check if NM is running */ dbus_g_proxy_begin_call (priv->dbus_proxy, "NameHasOwner", init_async_got_manager_running, init_data, NULL, G_TYPE_STRING, NM_DBUS_SERVICE, G_TYPE_INVALID); } } static gboolean init_finish (GAsyncInitable *initable, GAsyncResult *result, GError **error) { GSimpleAsyncResult *simple = G_SIMPLE_ASYNC_RESULT (result); if (g_simple_async_result_propagate_error (simple, error)) return FALSE; else return TRUE; } static void dispose (GObject *object) { NMRemoteSettings *self = NM_REMOTE_SETTINGS (object); NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (self); while (g_slist_length (priv->add_list)) add_connection_info_dispose (self, (AddConnectionInfo *) priv->add_list->data); if (priv->connections) { g_hash_table_destroy (priv->connections); priv->connections = NULL; } if (priv->pending) { g_hash_table_destroy (priv->pending); priv->pending = NULL; } g_free (priv->hostname); priv->hostname = NULL; g_clear_object (&priv->dbus_proxy); g_clear_object (&priv->proxy); g_clear_object (&priv->props_proxy); if (priv->bus) { dbus_g_connection_unref (priv->bus); priv->bus = NULL; } G_OBJECT_CLASS (nm_remote_settings_parent_class)->dispose (object); } static void set_property (GObject *object, guint prop_id, const GValue *value, GParamSpec *pspec) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (object); switch (prop_id) { case PROP_BUS: /* Construct only */ priv->bus = g_value_dup_boxed (value); if (!priv->bus) { priv->bus = _nm_dbus_new_connection (NULL); priv->private_bus = _nm_dbus_is_connection_private (priv->bus); } break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void get_property (GObject *object, guint prop_id, GValue *value, GParamSpec *pspec) { NMRemoteSettingsPrivate *priv = NM_REMOTE_SETTINGS_GET_PRIVATE (object); _nm_remote_settings_ensure_inited (NM_REMOTE_SETTINGS (object)); switch (prop_id) { case PROP_BUS: g_value_set_boxed (value, priv->bus); break; case PROP_SERVICE_RUNNING: g_value_set_boolean (value, priv->service_running); break; case PROP_HOSTNAME: g_value_set_string (value, priv->hostname); break; case PROP_CAN_MODIFY: g_value_set_boolean (value, priv->can_modify); break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec); break; } } static void nm_remote_settings_class_init (NMRemoteSettingsClass *class) { GObjectClass *object_class = G_OBJECT_CLASS (class); g_type_class_add_private (class, sizeof (NMRemoteSettingsPrivate)); /* Virtual methods */ object_class->constructed = constructed; object_class->set_property = set_property; object_class->get_property = get_property; object_class->dispose = dispose; /* Properties */ /** * NMRemoteSettings:bus: * * The #DBusGConnection that the #NMRemoteSettings is connected to. Defaults * to the system bus if not specified. */ g_object_class_install_property (object_class, PROP_BUS, g_param_spec_boxed (NM_REMOTE_SETTINGS_BUS, "", "", DBUS_TYPE_G_CONNECTION, G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY | G_PARAM_STATIC_STRINGS)); /** * NMRemoteSettings:service-running: * * Whether the settings service is running. */ g_object_class_install_property (object_class, PROP_SERVICE_RUNNING, g_param_spec_boolean (NM_REMOTE_SETTINGS_SERVICE_RUNNING, "", "", FALSE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); /** * NMRemoteSettings:hostname: * * The machine hostname stored in persistent configuration. This can be * modified by calling nm_remote_settings_save_hostname(). */ g_object_class_install_property (object_class, PROP_HOSTNAME, g_param_spec_string (NM_REMOTE_SETTINGS_HOSTNAME, "", "", NULL, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); /** * NMRemoteSettings:can-modify: * * If %TRUE, adding and modifying connections is supported. */ g_object_class_install_property (object_class, PROP_CAN_MODIFY, g_param_spec_boolean (NM_REMOTE_SETTINGS_CAN_MODIFY, "", "", FALSE, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS)); /* Signals */ signals[NEW_CONNECTION] = g_signal_new (NM_REMOTE_SETTINGS_NEW_CONNECTION, G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (NMRemoteSettingsClass, new_connection), NULL, NULL, NULL, G_TYPE_NONE, 1, G_TYPE_OBJECT); signals[CONNECTIONS_READ] = g_signal_new (NM_REMOTE_SETTINGS_CONNECTIONS_READ, G_OBJECT_CLASS_TYPE (object_class), G_SIGNAL_RUN_FIRST, G_STRUCT_OFFSET (NMRemoteSettingsClass, connections_read), NULL, NULL, NULL, G_TYPE_NONE, 0); } static void nm_remote_settings_initable_iface_init (GInitableIface *iface) { iface->init = init_sync; } static void nm_remote_settings_async_initable_iface_init (GAsyncInitableIface *iface) { iface->init_async = init_async; iface->init_finish = init_finish; }
sujithshankar/NetworkManager
libnm-glib/nm-remote-settings.c
C
gpl-2.0
52,031
/* * Copyright (C) 1996-2016 The Squid Software Foundation and contributors * * Squid software is distributed under GPLv2+ license and includes * contributions from numerous individuals and organizations. * Please see the COPYING and CONTRIBUTORS files for details. */ #ifndef SQUID_LOADABLE_MODULES_H #define SQUID_LOADABLE_MODULES_H // TODO: add reporting for cachemgr // TODO: add reconfiguration support class wordlist; void LoadableModulesConfigure(const wordlist *names); #endif /* SQUID_LOADABLE_MODULES_H */
saucelabs/squid3
src/LoadableModules.h
C
gpl-2.0
527
<?php /** * Rhythm Categories * * @package Rhythm */ class WP_Rhythm_Categories_Widget extends WP_Widget { function __construct() { $widget_ops = array('classname' => 'widget_rhythm_categories', 'description' => __( "Displays list of categories", 'rhythm-addons' ) ); parent::__construct('rhythm-categories', __( 'Rhythm Categories', 'rhythm-addons' ), $widget_ops); $this-> alt_option_name = 'widget_rhythm_categories'; add_action( 'save_post', array(&$this, 'flush_widget_cache') ); add_action( 'deleted_post', array(&$this, 'flush_widget_cache') ); add_action( 'switch_theme', array(&$this, 'flush_widget_cache') ); } function widget($args, $instance) { global $post; $cache = wp_cache_get('widget_rhythm_categories', 'widget'); if ( !is_array($cache) ) { $cache = array(); } if ( ! isset( $args['widget_id'] ) ) { $args['widget_id'] = $this->id; } if ( isset( $cache[ $args['widget_id'] ] ) ) { echo $cache[ $args['widget_id'] ]; return; } ob_start(); extract($args); echo $before_widget; $title = apply_filters('widget_title', $instance['title'], $instance, $this->id_base); if ($title): echo $before_title.esc_html($title).$after_title; endif; $cat_args = array( 'type' => 'post', 'child_of' => 0, 'parent' => '', 'orderby' => 'name', 'order' => 'ASC', 'hide_empty' => 0, 'hierarchical' => 0, 'taxonomy' => 'category', ); $categories = get_categories( $cat_args ); if (is_array($categories) && !is_wp_error($categories)) { $cat_per_row = ceil(count($categories) / 2); ?> <div class="row"> <?php for ($i = 0; $i < 2; $i++) { ?> <div class="col-md-6"> <ul class="clearlist widget-menu"> <?php $k = 0; foreach ($categories as $category) { $k ++; if ( ($i == 0 && $k > $cat_per_row) || ($i == 1 && $k <= $cat_per_row) ) { continue; } ?> <li> <a href="<?php echo esc_url(get_category_link($category -> term_id)); ?>" title="<?php echo esc_attr($category -> name); ?>"><?php echo esc_html($category -> name); ?></a> <small> - <?php echo intval($category -> category_count); ?> </small> </li> <?php } ?> </ul> </div> <?php } ?> </div> <?php } echo $after_widget; $cache[$args['widget_id']] = ob_get_flush(); wp_cache_set('widget_rhythm_categories', $cache, 'widget'); } function update( $new_instance, $old_instance ) { $instance = $old_instance; $instance['title'] = strip_tags($new_instance['title']); $this->flush_widget_cache(); $alloptions = wp_cache_get( 'alloptions', 'options' ); if ( isset($alloptions['widget_rhythm_categories']) ) { delete_option('widget_rhythm_categories'); } return $instance; } function flush_widget_cache() { wp_cache_delete('widget_rhythm_categories', 'widget'); } function form( $instance ) { $title = isset($instance['title']) ? $instance['title'] : ''; ?> <p><label for="<?php echo esc_attr($this->get_field_id('title')); ?>"><?php _e( 'Title:', 'rhythm-addons' ); ?></label> <input class="widefat" id="<?php echo esc_attr($this->get_field_id('title')); ?>" name="<?php echo esc_attr($this->get_field_name('title')); ?>" type="text" value="<?php echo esc_attr($title); ?>" /></p> <?php } }
carlos09/fredrixprint
wp-content/themes/rhythm/plugins/rhythm-addons/widgets/WP_Rhythm_Categories_Widget.class.php
PHP
gpl-2.0
3,846
/*************************************************************************** qgsoffscreen3dengine.cpp -------------------------------------- Date : July 2018 Copyright : (C) 2018 by Martin Dobias Email : wonder dot sk at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ #include "qgsoffscreen3dengine.h" #include <QOffscreenSurface> #include <QSurfaceFormat> #include <Qt3DCore/QAspectEngine> #include <Qt3DLogic/QLogicAspect> #include <Qt3DRender/QCamera> #include <Qt3DRender/QCameraSelector> #include <Qt3DRender/QClearBuffers> #include <Qt3DRender/QRenderAspect> #include <Qt3DRender/QRenderCapture> #include <Qt3DRender/QRenderSettings> #include <Qt3DRender/QRenderTarget> #include <Qt3DRender/QRenderTargetOutput> #include <Qt3DRender/QRenderTargetSelector> #include <Qt3DRender/QRenderSurfaceSelector> #include <Qt3DRender/QTexture> #include <Qt3DRender/QViewport> QgsOffscreen3DEngine::QgsOffscreen3DEngine() { // Set up the default OpenGL surface format. QSurfaceFormat format; format.setDepthBufferSize( 32 ); // TODO: or 24? (used by QWindow3D) format.setSamples( 8 ); QSurfaceFormat::setDefaultFormat( format ); // Set up a camera to point at the shapes. mCamera = new Qt3DRender::QCamera; mCamera->lens()->setPerspectiveProjection( 45.0f, float( mSize.width() ) / float( mSize.height() ), 0.1f, 1000.0f ); mCamera->setPosition( QVector3D( 0, 0, 20.0f ) ); mCamera->setUpVector( QVector3D( 0, 1, 0 ) ); mCamera->setViewCenter( QVector3D( 0, 0, 0 ) ); // Set up the engine and the aspects that we want to use. mAspectEngine = new Qt3DCore::QAspectEngine(); mRenderAspect = new Qt3DRender::QRenderAspect( Qt3DRender::QRenderAspect::Threaded ); // Only threaded mode seems to work right now. mLogicAspect = new Qt3DLogic::QLogicAspect(); mAspectEngine->registerAspect( mRenderAspect ); mAspectEngine->registerAspect( mLogicAspect ); // Create the root entity of the engine. // This is not the same as the 3D scene root: the QRenderSettings // component must be held by the root of the QEntity tree, // so it is added to this one. The 3D scene is added as a subtree later, // in setRootEntity(). mRoot = new Qt3DCore::QEntity(); mRenderSettings = new Qt3DRender::QRenderSettings( mRoot ); mRoot->addComponent( mRenderSettings ); mCamera->setParent( mRoot ); // Create the offscreen frame graph, which will manage all of the resources required // for rendering without a QWindow. createFrameGraph(); // Set this frame graph to be in use. // the render settings also sets itself as the parent of mSurfaceSelector mRenderSettings->setActiveFrameGraph( mSurfaceSelector ); // Set the root entity of the engine. This causes the engine to begin running. mAspectEngine->setRootEntity( Qt3DCore::QEntityPtr( mRoot ) ); } QgsOffscreen3DEngine::~QgsOffscreen3DEngine() { delete mAspectEngine; delete mOffscreenSurface; } void QgsOffscreen3DEngine::setSize( const QSize &s ) { mSize = s; mTexture->setSize( mSize.width(), mSize.height() ); mDepthTexture->setSize( mSize.width(), mSize.height() ); mSurfaceSelector->setExternalRenderTargetSize( mSize ); mCamera->setAspectRatio( float( mSize.width() ) / float( mSize.height() ) ); } void QgsOffscreen3DEngine::setClearColor( const QColor &color ) { mClearBuffers->setClearColor( color ); } void QgsOffscreen3DEngine::setFrustumCullingEnabled( bool enabled ) { // TODO Q_UNUSED( enabled ); } void QgsOffscreen3DEngine::createRenderTarget() { mTextureTarget = new Qt3DRender::QRenderTarget; // The lifetime of the objects created here is managed // automatically, as they become children of this object. // Create a render target output for rendering color. mTextureOutput = new Qt3DRender::QRenderTargetOutput( mTextureTarget ); mTextureOutput->setAttachmentPoint( Qt3DRender::QRenderTargetOutput::Color0 ); // Create a texture to render into. mTexture = new Qt3DRender::QTexture2D( mTextureOutput ); mTexture->setSize( mSize.width(), mSize.height() ); mTexture->setFormat( Qt3DRender::QAbstractTexture::RGB8_UNorm ); mTexture->setMinificationFilter( Qt3DRender::QAbstractTexture::Linear ); mTexture->setMagnificationFilter( Qt3DRender::QAbstractTexture::Linear ); // Hook the texture up to our output, and the output up to this object. mTextureOutput->setTexture( mTexture ); mTextureTarget->addOutput( mTextureOutput ); mDepthTextureOutput = new Qt3DRender::QRenderTargetOutput( mTextureTarget ); mDepthTextureOutput->setAttachmentPoint( Qt3DRender::QRenderTargetOutput::Depth ); mDepthTexture = new Qt3DRender::QTexture2D( mDepthTextureOutput ); mDepthTexture->setSize( mSize.width(), mSize.height() ); mDepthTexture->setFormat( Qt3DRender::QAbstractTexture::DepthFormat ); mDepthTexture->setMinificationFilter( Qt3DRender::QAbstractTexture::Linear ); mDepthTexture->setMagnificationFilter( Qt3DRender::QAbstractTexture::Linear ); mDepthTexture->setComparisonFunction( Qt3DRender::QAbstractTexture::CompareLessEqual ); mDepthTexture->setComparisonMode( Qt3DRender::QAbstractTexture::CompareRefToTexture ); // Hook up the depth texture mDepthTextureOutput->setTexture( mDepthTexture ); mTextureTarget->addOutput( mDepthTextureOutput ); } void QgsOffscreen3DEngine::createFrameGraph() { // Firstly, create the offscreen surface. This will take the place // of a QWindow, allowing us to render our scene without one. mOffscreenSurface = new QOffscreenSurface(); mOffscreenSurface->setFormat( QSurfaceFormat::defaultFormat() ); mOffscreenSurface->create(); // Hook it up to the frame graph. mSurfaceSelector = new Qt3DRender::QRenderSurfaceSelector( mRenderSettings ); mSurfaceSelector->setSurface( mOffscreenSurface ); mSurfaceSelector->setExternalRenderTargetSize( mSize ); // Create a texture to render into. This acts as the buffer that // holds the rendered image. mRenderTargetSelector = new Qt3DRender::QRenderTargetSelector( mSurfaceSelector ); createRenderTarget(); // the target selector also sets itself as the parent of mTextureTarget mRenderTargetSelector->setTarget( mTextureTarget ); // Create a node used for clearing the required buffers. mClearBuffers = new Qt3DRender::QClearBuffers( mRenderTargetSelector ); mClearBuffers->setClearColor( QColor( 100, 100, 100, 255 ) ); mClearBuffers->setBuffers( Qt3DRender::QClearBuffers::ColorDepthBuffer ); // Create a viewport node. The viewport here just covers the entire render area. mViewport = new Qt3DRender::QViewport( mRenderTargetSelector ); mViewport->setNormalizedRect( QRectF( 0.0, 0.0, 1.0, 1.0 ) ); // Create a camera selector node, and tell it to use the camera we've ben given. mCameraSelector = new Qt3DRender::QCameraSelector( mViewport ); mCameraSelector->setCamera( mCamera ); // Add a render capture node to the frame graph. // This is set as the next child of the render target selector node, // so that the capture will be taken from the specified render target // once all other rendering operations have taken place. mRenderCapture = new Qt3DRender::QRenderCapture( mRenderTargetSelector ); } void QgsOffscreen3DEngine::setRootEntity( Qt3DCore::QEntity *root ) { // Make sure any existing scene root is unparented. if ( mSceneRoot ) { mSceneRoot->setParent( static_cast<Qt3DCore::QNode *>( nullptr ) ); } // Parent the incoming scene root to our current root entity. mSceneRoot = root; mSceneRoot->setParent( mAspectEngine->rootEntity().data() ); } Qt3DRender::QRenderSettings *QgsOffscreen3DEngine::renderSettings() { return mRenderSettings; } Qt3DRender::QCamera *QgsOffscreen3DEngine::camera() { return mCamera; } QSize QgsOffscreen3DEngine::size() const { return mSize; } void QgsOffscreen3DEngine::requestCaptureImage() { if ( mReply ) { qDebug() << "already having a pending capture, skipping"; return; } mReply = mRenderCapture->requestCapture(); connect( mReply, &Qt3DRender::QRenderCaptureReply::completed, this, [ = ] { QImage image = mReply->image(); mReply->deleteLater(); mReply = nullptr; emit imageCaptured( image ); } ); }
raymondnijssen/QGIS
src/3d/qgsoffscreen3dengine.cpp
C++
gpl-2.0
8,804
require_dependency 'email' require_dependency 'email_token' require_dependency 'trust_level' require_dependency 'pbkdf2' require_dependency 'discourse' require_dependency 'post_destroyer' require_dependency 'user_name_suggester' require_dependency 'pretty_text' require_dependency 'url_helper' require_dependency 'letter_avatar' require_dependency 'promotion' class User < ActiveRecord::Base include Roleable include UrlHelper include HasCustomFields has_many :posts has_many :notifications, dependent: :destroy has_many :topic_users, dependent: :destroy has_many :topics has_many :user_open_ids, dependent: :destroy has_many :user_actions, dependent: :destroy has_many :post_actions, dependent: :destroy has_many :user_badges, -> {where('user_badges.badge_id IN (SELECT id FROM badges where enabled)')}, dependent: :destroy has_many :badges, through: :user_badges has_many :email_logs, dependent: :delete_all has_many :post_timings has_many :topic_allowed_users, dependent: :destroy has_many :topics_allowed, through: :topic_allowed_users, source: :topic has_many :email_tokens, dependent: :destroy has_many :views has_many :user_visits, dependent: :destroy has_many :invites, dependent: :destroy has_many :topic_links, dependent: :destroy has_many :uploads has_many :warnings has_one :user_avatar, dependent: :destroy has_one :facebook_user_info, dependent: :destroy has_one :twitter_user_info, dependent: :destroy has_one :github_user_info, dependent: :destroy has_one :google_user_info, dependent: :destroy has_one :oauth2_user_info, dependent: :destroy has_one :user_stat, dependent: :destroy has_one :user_profile, dependent: :destroy, inverse_of: :user has_one :single_sign_on_record, dependent: :destroy belongs_to :approved_by, class_name: 'User' belongs_to :primary_group, class_name: 'Group' has_many :group_users, dependent: :destroy has_many :groups, through: :group_users has_many :secure_categories, through: :groups, source: :categories has_many :group_managers, dependent: :destroy has_many :managed_groups, through: :group_managers, source: :group has_many :muted_user_records, class_name: 'MutedUser' has_many :muted_users, through: :muted_user_records has_one :user_search_data, dependent: :destroy has_one :api_key, dependent: :destroy belongs_to :uploaded_avatar, class_name: 'Upload' delegate :last_sent_email_address, :to => :email_logs before_validation :strip_downcase_email validates_presence_of :username validate :username_validator validates :email, presence: true, uniqueness: true validates :email, email: true, if: :email_changed? validate :password_validator validates :name, user_full_name: true, if: :name_changed? validates :ip_address, allowed_ip_address: {on: :create, message: :signup_not_allowed} after_initialize :add_trust_level after_initialize :set_default_email_digest after_initialize :set_default_external_links_in_new_tab after_create :create_email_token after_create :create_user_stat after_create :create_user_profile after_create :ensure_in_trust_level_group after_create :automatic_group_membership before_save :update_username_lower before_save :ensure_password_is_hashed after_save :update_tracked_topics after_save :clear_global_notice_if_needed after_save :refresh_avatar after_save :badge_grant before_destroy do # These tables don't have primary keys, so destroying them with activerecord is tricky: PostTiming.delete_all(user_id: self.id) TopicViewItem.delete_all(user_id: self.id) end # Whether we need to be sending a system message after creation attr_accessor :send_welcome_message # This is just used to pass some information into the serializer attr_accessor :notification_channel_position # set to true to optimize creation and save for imports attr_accessor :import_mode # excluding fake users like the system user or anonymous users scope :real, -> { where('id > 0').where('NOT EXISTS( SELECT 1 FROM user_custom_fields ucf WHERE ucf.user_id = users.id AND ucf.name = ? AND ucf.value::int > 0 )', 'master_id') } scope :staff, -> { where("admin OR moderator") } # TODO-PERF: There is no indexes on any of these # and NotifyMailingListSubscribers does a select-all-and-loop # may want to create an index on (active, blocked, suspended_till, mailing_list_mode)? scope :blocked, -> { where(blocked: true) } scope :not_blocked, -> { where(blocked: false) } scope :suspended, -> { where('suspended_till IS NOT NULL AND suspended_till > ?', Time.zone.now) } scope :not_suspended, -> { where('suspended_till IS NULL OR suspended_till <= ?', Time.zone.now) } scope :activated, -> { where(active: true) } module NewTopicDuration ALWAYS = -1 LAST_VISIT = -2 end def self.max_password_length 200 end def self.username_length SiteSetting.min_username_length.to_i..SiteSetting.max_username_length.to_i end def custom_groups groups.where(automatic: false, visible: true) end def self.username_available?(username) lower = username.downcase User.where(username_lower: lower).blank? end def effective_locale if SiteSetting.allow_user_locale && self.locale.present? self.locale else SiteSetting.default_locale end end EMAIL = %r{([^@]+)@([^\.]+)} def self.new_from_params(params) user = User.new user.name = params[:name] user.email = params[:email] user.password = params[:password] user.username = params[:username] user end def self.suggest_name(email) return "" unless email name = email.split(/[@\+]/)[0] name = name.gsub(".", " ") name.titleize end def self.find_by_username_or_email(username_or_email) if username_or_email.include?('@') find_by_email(username_or_email) else find_by_username(username_or_email) end end def self.find_by_email(email) find_by(email: Email.downcase(email)) end def self.find_by_username(username) find_by(username_lower: username.downcase) end def enqueue_welcome_message(message_type) return unless SiteSetting.send_welcome_message? Jobs.enqueue(:send_system_message, user_id: id, message_type: message_type) end def change_username(new_username, actor=nil) UsernameChanger.change(self, new_username, actor) end def created_topic_count stat = user_stat || create_user_stat stat.topic_count end alias_method :topic_count, :created_topic_count # tricky, we need our bus to be subscribed from the right spot def sync_notification_channel_position @unread_notifications_by_type = nil self.notification_channel_position = MessageBus.last_id("/notification/#{id}") end def invited_by used_invite = invites.where("redeemed_at is not null").includes(:invited_by).first used_invite.try(:invited_by) end # Approve this user def approve(approved_by, send_mail=true) self.approved = true if approved_by.is_a?(Fixnum) self.approved_by_id = approved_by else self.approved_by = approved_by end self.approved_at = Time.now send_approval_email if save and send_mail end def self.email_hash(email) Digest::MD5.hexdigest(email.strip.downcase) end def email_hash User.email_hash(email) end def reload @unread_notifications = nil @unread_total_notifications = nil @unread_pms = nil super end def unread_private_messages @unread_pms ||= begin # perf critical, much more efficient than AR sql = " SELECT COUNT(*) FROM notifications n LEFT JOIN topics t ON n.topic_id = t.id WHERE t.deleted_at IS NULL AND n.notification_type = :type AND n.user_id = :user_id AND NOT read" User.exec_sql(sql, user_id: id, type: Notification.types[:private_message]) .getvalue(0,0).to_i end end def unread_notifications @unread_notifications ||= begin # perf critical, much more efficient than AR sql = " SELECT COUNT(*) FROM notifications n LEFT JOIN topics t ON n.topic_id = t.id WHERE t.deleted_at IS NULL AND n.notification_type <> :pm AND n.user_id = :user_id AND NOT read AND n.id > :seen_notification_id" User.exec_sql(sql, user_id: id, seen_notification_id: seen_notification_id, pm: Notification.types[:private_message]) .getvalue(0,0).to_i end end def total_unread_notifications @unread_total_notifications ||= notifications.where("read = false").count end def saw_notification_id(notification_id) User.where("id = ? and seen_notification_id < ?", id, notification_id) .update_all ["seen_notification_id = ?", notification_id] # mark all badge notifications read Notification.where('user_id = ? AND NOT read AND notification_type = ?', id, Notification.types[:granted_badge]) .update_all ["read = ?", true] end def publish_notifications_state MessageBus.publish("/notification/#{id}", {unread_notifications: unread_notifications, unread_private_messages: unread_private_messages, total_unread_notifications: total_unread_notifications}, user_ids: [id] # only publish the notification to this user ) end # A selection of people to autocomplete on @mention def self.mentionable_usernames User.select(:username).order('last_posted_at desc').limit(20) end def password=(password) # special case for passwordless accounts @raw_password = password unless password.blank? end def password '' # so that validator doesn't complain that a password attribute doesn't exist end # Indicate that this is NOT a passwordless account for the purposes of validation def password_required! @password_required = true end def password_required? !!@password_required end def has_password? password_hash.present? end def password_validator PasswordValidator.new(attributes: :password).validate_each(self, :password, @raw_password) end def confirm_password?(password) return false unless password_hash && salt self.password_hash == hash_password(password, salt) end def first_day_user? !staff? && trust_level < TrustLevel[2] && created_at >= 24.hours.ago end def new_user? (created_at >= 24.hours.ago || trust_level == TrustLevel[0]) && trust_level < TrustLevel[2] && !staff? end def seen_before? last_seen_at.present? end def visit_record_for(date) user_visits.find_by(visited_at: date) end def update_visit_record!(date) create_visit_record!(date) unless visit_record_for(date) end def update_posts_read!(num_posts, now=Time.zone.now) if user_visit = visit_record_for(now.to_date) user_visit.posts_read += num_posts user_visit.save user_visit else create_visit_record!(now.to_date, num_posts) end end def update_ip_address!(new_ip_address) unless ip_address == new_ip_address || new_ip_address.blank? update_column(:ip_address, new_ip_address) end end def update_last_seen!(now=Time.zone.now) now_date = now.to_date # Only update last seen once every minute redis_key = "user:#{id}:#{now_date}" return unless $redis.setnx(redis_key, "1") $redis.expire(redis_key, SiteSetting.active_user_rate_limit_secs) update_previous_visit(now) # using update_column to avoid the AR transaction update_column(:last_seen_at, now) end def self.gravatar_template(email) email_hash = self.email_hash(email) "//www.gravatar.com/avatar/#{email_hash}.png?s={size}&r=pg&d=identicon" end # Don't pass this up to the client - it's meant for server side use # This is used in # - self oneboxes in open graph data # - emails def small_avatar_url avatar_template_url.gsub("{size}", "45") end def avatar_template_url schemaless absolute avatar_template end def self.avatar_template(username,uploaded_avatar_id) return letter_avatar_template(username) if !uploaded_avatar_id id = uploaded_avatar_id username ||= "" "#{Discourse.base_uri}/user_avatar/#{RailsMultisite::ConnectionManagement.current_hostname}/#{username.downcase}/{size}/#{id}.png" end def self.letter_avatar_template(username) "#{Discourse.base_uri}/letter_avatar/#{username.downcase}/{size}/#{LetterAvatar.version}.png" end def avatar_template self.class.avatar_template(username,uploaded_avatar_id) end # The following count methods are somewhat slow - definitely don't use them in a loop. # They might need to be denormalized def like_count UserAction.where(user_id: id, action_type: UserAction::WAS_LIKED).count end def like_given_count UserAction.where(user_id: id, action_type: UserAction::LIKE).count end def post_count stat = user_stat || create_user_stat stat.post_count end def flags_given_count PostAction.where(user_id: id, post_action_type_id: PostActionType.flag_types.values).count end def warnings_received_count warnings.count end def flags_received_count posts.includes(:post_actions).where('post_actions.post_action_type_id' => PostActionType.flag_types.values).count end def private_topics_count topics_allowed.where(archetype: Archetype.private_message).count end def posted_too_much_in_topic?(topic_id) # Does not apply to staff, non-new members or your own topics return false if staff? || (trust_level != TrustLevel[0]) || Topic.where(id: topic_id, user_id: id).exists? last_action_in_topic = UserAction.last_action_in_topic(id, topic_id) since_reply = Post.where(user_id: id, topic_id: topic_id) since_reply = since_reply.where('id > ?', last_action_in_topic) if last_action_in_topic (since_reply.count >= SiteSetting.newuser_max_replies_per_topic) end def delete_all_posts!(guardian) raise Discourse::InvalidAccess unless guardian.can_delete_all_posts? self QueuedPost.where(user_id: id).delete_all posts.order("post_number desc").each do |p| PostDestroyer.new(guardian.user, p).destroy end end def suspended? suspended_till && suspended_till > DateTime.now end def suspend_record UserHistory.for(self, :suspend_user).order('id DESC').first end def suspend_reason suspend_record.try(:details) if suspended? end # Use this helper to determine if the user has a particular trust level. # Takes into account admin, etc. def has_trust_level?(level) raise "Invalid trust level #{level}" unless TrustLevel.valid?(level) admin? || moderator? || TrustLevel.compare(trust_level, level) end # a touch faster than automatic def admin? admin end def guardian Guardian.new(self) end def username_format_validator UsernameValidator.perform_validation(self, 'username') end def email_confirmed? email_tokens.where(email: email, confirmed: true).present? || email_tokens.empty? end def activate email_token = self.email_tokens.active.first if email_token EmailToken.confirm(email_token.token) else self.active = true save end end def deactivate self.active = false save end def change_trust_level!(level, opts=nil) Promotion.new(self).change_trust_level!(level, opts) end def treat_as_new_topic_start_date duration = new_topic_duration_minutes || SiteSetting.new_topic_duration_minutes [case duration when User::NewTopicDuration::ALWAYS created_at when User::NewTopicDuration::LAST_VISIT previous_visit_at || user_stat.new_since else duration.minutes.ago end, user_stat.new_since].max end def readable_name return "#{name} (#{username})" if name.present? && name != username username end def badge_count user_badges.select('distinct badge_id').count end def featured_user_badges user_badges .joins(:badge) .order("CASE WHEN badges.id = (SELECT MAX(ub2.badge_id) FROM user_badges ub2 WHERE ub2.badge_id IN (#{Badge.trust_level_badge_ids.join(",")}) AND ub2.user_id = #{self.id}) THEN 1 ELSE 0 END DESC") .order('badges.badge_type_id ASC, badges.grant_count ASC') .includes(:user, :granted_by, badge: :badge_type) .where("user_badges.id in (select min(u2.id) from user_badges u2 where u2.user_id = ? group by u2.badge_id)", id) .limit(3) end def self.count_by_signup_date(start_date, end_date) where('created_at >= ? and created_at <= ?', start_date, end_date).group('date(created_at)').order('date(created_at)').count end def secure_category_ids cats = self.admin? ? Category.where(read_restricted: true) : secure_categories.references(:categories) cats.pluck('categories.id').sort end def topic_create_allowed_category_ids Category.topic_create_allowed(self.id).select(:id) end # Flag all posts from a user as spam def flag_linked_posts_as_spam admin = Discourse.system_user topic_links.includes(:post).each do |tl| begin PostAction.act(admin, tl.post, PostActionType.types[:spam], message: I18n.t('flag_reason.spam_hosts')) rescue PostAction::AlreadyActed # If the user has already acted, just ignore it end end end def has_uploaded_avatar uploaded_avatar.present? end def generate_api_key(created_by) if api_key.present? api_key.regenerate!(created_by) api_key else ApiKey.create(user: self, key: SecureRandom.hex(32), created_by: created_by) end end def revoke_api_key ApiKey.where(user_id: self.id).delete_all end def find_email last_sent_email_address || email end def tl3_requirements @lq ||= TrustLevel3Requirements.new(self) end def on_tl3_grace_period? UserHistory.for(self, :auto_trust_level_change) .where('created_at >= ?', SiteSetting.tl3_promotion_min_duration.to_i.days.ago) .where(previous_value: TrustLevel[2].to_s) .where(new_value: TrustLevel[3].to_s) .exists? end def should_be_redirected_to_top redirected_to_top_reason.present? end def redirected_to_top_reason # redirect is enabled return unless SiteSetting.redirect_users_to_top_page # top must be in the top_menu return unless SiteSetting.top_menu =~ /top/i # there should be enough topics return unless SiteSetting.has_enough_topics_to_redirect_to_top if !seen_before? || (trust_level == 0 && !redirected_to_top_yet?) update_last_redirected_to_top! return I18n.t('redirected_to_top_reasons.new_user') elsif last_seen_at < 1.month.ago update_last_redirected_to_top! return I18n.t('redirected_to_top_reasons.not_seen_in_a_month') end # no reason nil end def redirected_to_top_yet? last_redirected_to_top_at.present? end def update_last_redirected_to_top! key = "user:#{id}:update_last_redirected_to_top" delay = SiteSetting.active_user_rate_limit_secs # only update last_redirected_to_top_at once every minute return unless $redis.setnx(key, "1") $redis.expire(key, delay) # delay the update Jobs.enqueue_in(delay / 2, :update_top_redirection, user_id: self.id, redirected_at: Time.zone.now) end def refresh_avatar return if @import_mode avatar = user_avatar || create_user_avatar if SiteSetting.automatically_download_gravatars? && !avatar.last_gravatar_download_attempt Jobs.enqueue(:update_gravatar, user_id: self.id, avatar_id: avatar.id) end # mark all the user's quoted posts as "needing a rebake" Post.rebake_all_quoted_posts(self.id) if self.uploaded_avatar_id_changed? end def first_post_created_at user_stat.try(:first_post_created_at) end def associated_accounts result = [] result << "Twitter(#{twitter_user_info.screen_name})" if twitter_user_info result << "Facebook(#{facebook_user_info.username})" if facebook_user_info result << "Google(#{google_user_info.email})" if google_user_info result << "Github(#{github_user_info.screen_name})" if github_user_info user_open_ids.each do |oid| result << "OpenID #{oid.url[0..20]}...(#{oid.email})" end result.empty? ? I18n.t("user.no_accounts_associated") : result.join(", ") end def user_fields return @user_fields if @user_fields user_field_ids = UserField.pluck(:id) if user_field_ids.present? @user_fields = {} user_field_ids.each do |fid| @user_fields[fid.to_s] = custom_fields["user_field_#{fid}"] end end @user_fields end def title=(val) write_attribute(:title, val) if !new_record? && user_profile user_profile.update_column(:badge_granted_title, false) end end def number_of_deleted_posts Post.with_deleted .where(user_id: self.id) .where.not(deleted_at: nil) .count end def number_of_flagged_posts Post.with_deleted .where(user_id: self.id) .where(id: PostAction.where(post_action_type_id: PostActionType.notify_flag_type_ids) .where(disagreed_at: nil) .select(:post_id)) .count end def number_of_flags_given PostAction.where(user_id: self.id) .where(disagreed_at: nil) .where(post_action_type_id: PostActionType.notify_flag_type_ids) .count end def number_of_warnings self.warnings.count end def number_of_suspensions UserHistory.for(self, :suspend_user).count end def create_user_profile UserProfile.create(user_id: id) end def anonymous? SiteSetting.allow_anonymous_posting && trust_level >= 1 && custom_fields["master_id"].to_i > 0 end protected def badge_grant BadgeGranter.queue_badge_grant(Badge::Trigger::UserChange, user: self) end def update_tracked_topics return unless auto_track_topics_after_msecs_changed? TrackedTopicsUpdater.new(id, auto_track_topics_after_msecs).call end def clear_global_notice_if_needed if admin && SiteSetting.has_login_hint SiteSetting.has_login_hint = false SiteSetting.global_notice = "" end end def ensure_in_trust_level_group Group.user_trust_level_change!(id, trust_level) end def automatic_group_membership Group.where(automatic: false) .where("LENGTH(COALESCE(automatic_membership_email_domains, '')) > 0") .each do |group| domains = group.automatic_membership_email_domains.gsub('.', '\.') if self.email =~ Regexp.new("@(#{domains})$", true) group.add(self) rescue ActiveRecord::RecordNotUnique end end end def create_user_stat stat = UserStat.new(new_since: Time.now) stat.user_id = id stat.save! end def create_email_token email_tokens.create(email: email) end def create_visit_record!(date, posts_read=0) user_stat.update_column(:days_visited, user_stat.days_visited + 1) user_visits.create!(visited_at: date, posts_read: posts_read) end def ensure_password_is_hashed if @raw_password self.salt = SecureRandom.hex(16) self.password_hash = hash_password(@raw_password, salt) end end def hash_password(password, salt) raise "password is too long" if password.size > User.max_password_length Pbkdf2.hash_password(password, salt, Rails.configuration.pbkdf2_iterations, Rails.configuration.pbkdf2_algorithm) end def add_trust_level # there is a possibility we did not load trust level column, skip it return unless has_attribute? :trust_level self.trust_level ||= SiteSetting.default_trust_level end def update_username_lower self.username_lower = username.downcase end def strip_downcase_email if self.email self.email = self.email.strip self.email = self.email.downcase end end def username_validator username_format_validator || begin lower = username.downcase existing = User.find_by(username_lower: lower) if username_changed? && existing && existing.id != self.id errors.add(:username, I18n.t(:'user.username.unique')) end end end def send_approval_email if SiteSetting.must_approve_users Jobs.enqueue(:user_email, type: :signup_after_approval, user_id: id, email_token: email_tokens.first.token ) end end def set_default_email_digest if has_attribute?(:email_digests) && self.email_digests.nil? if SiteSetting.default_digest_email_frequency.blank? self.email_digests = false else self.email_digests = true self.digest_after_days ||= SiteSetting.default_digest_email_frequency.to_i if has_attribute?(:digest_after_days) end end end def set_default_external_links_in_new_tab if has_attribute?(:external_links_in_new_tab) && self.external_links_in_new_tab.nil? self.external_links_in_new_tab = !SiteSetting.default_external_links_in_new_tab.blank? end end # Delete unactivated accounts (without verified email) that are over a week old def self.purge_unactivated to_destroy = User.where(active: false) .joins('INNER JOIN user_stats AS us ON us.user_id = users.id') .where("created_at < ?", SiteSetting.purge_unactivated_users_grace_period_days.days.ago) .where('NOT admin AND NOT moderator') .limit(100) destroyer = UserDestroyer.new(Discourse.system_user) to_destroy.each do |u| begin destroyer.destroy(u, context: I18n.t(:purge_reason)) rescue Discourse::InvalidAccess, UserDestroyer::PostsExistError # if for some reason the user can't be deleted, continue on to the next one end end end private def previous_visit_at_update_required?(timestamp) seen_before? && (last_seen_at < (timestamp - SiteSetting.previous_visit_timeout_hours.hours)) end def update_previous_visit(timestamp) update_visit_record!(timestamp.to_date) if previous_visit_at_update_required?(timestamp) update_column(:previous_visit_at, last_seen_at) end end end # == Schema Information # # Table name: users # # id :integer not null, primary key # username :string(60) not null # created_at :datetime not null # updated_at :datetime not null # name :string(255) # seen_notification_id :integer default(0), not null # last_posted_at :datetime # email :string(256) not null # password_hash :string(64) # salt :string(32) # active :boolean default(FALSE), not null # username_lower :string(60) not null # auth_token :string(32) # last_seen_at :datetime # admin :boolean default(FALSE), not null # last_emailed_at :datetime # email_digests :boolean not null # trust_level :integer not null # email_private_messages :boolean default(TRUE) # email_direct :boolean default(TRUE), not null # approved :boolean default(FALSE), not null # approved_by_id :integer # approved_at :datetime # digest_after_days :integer # previous_visit_at :datetime # suspended_at :datetime # suspended_till :datetime # date_of_birth :date # auto_track_topics_after_msecs :integer # views :integer default(0), not null # flag_level :integer default(0), not null # ip_address :inet # new_topic_duration_minutes :integer # external_links_in_new_tab :boolean not null # enable_quoting :boolean default(TRUE), not null # moderator :boolean default(FALSE) # blocked :boolean default(FALSE) # dynamic_favicon :boolean default(FALSE), not null # title :string(255) # uploaded_avatar_id :integer # email_always :boolean default(FALSE), not null # mailing_list_mode :boolean default(FALSE), not null # locale :string(10) # primary_group_id :integer # registration_ip_address :inet # last_redirected_to_top_at :datetime # disable_jump_reply :boolean default(FALSE), not null # edit_history_public :boolean default(FALSE), not null # trust_level_locked :boolean default(FALSE), not null # # Indexes # # index_users_on_auth_token (auth_token) # index_users_on_last_posted_at (last_posted_at) # index_users_on_last_seen_at (last_seen_at) # index_users_on_username (username) UNIQUE # index_users_on_username_lower (username_lower) UNIQUE #
hxx/discourse
app/models/user.rb
Ruby
gpl-2.0
29,999
/* * Copyright (C) 2013-2017 Oracle Corporation * This file is based on ast_mode.c * Copyright 2012 Red Hat Inc. * Parts based on xf86-video-ast * Copyright (c) 2005 ASPEED Technology Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: Dave Airlie <airlied@redhat.com> * Michael Thayer <michael.thayer@oracle.com, * Hans de Goede <hdegoede@redhat.com> */ #include <linux/export.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_plane_helper.h> #include "vbox_drv.h" #include "vboxvideo.h" #include "hgsmi_channels.h" static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, u32 handle, u32 width, u32 height, s32 hot_x, s32 hot_y); static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y); /** * Set a graphics mode. Poke any required values into registers, do an HGSMI * mode set and tell the host we support advanced graphics functions. */ static void vbox_do_modeset(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct vbox_private *vbox; int width, height, bpp, pitch; u16 flags; s32 x_offset, y_offset; vbox = crtc->dev->dev_private; width = mode->hdisplay ? mode->hdisplay : 640; height = mode->vdisplay ? mode->vdisplay : 480; bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32; pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8; x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint; y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint; /* * This is the old way of setting graphics modes. It assumed one screen * and a frame-buffer at the start of video RAM. On older versions of * VirtualBox, certain parts of the code still assume that the first * screen is programmed this way, so try to fake it. */ if (vbox_crtc->crtc_id == 0 && crtc->enabled && vbox_crtc->fb_offset / pitch < 0xffff - crtc->y && vbox_crtc->fb_offset % (bpp / 8) == 0) { vbox_write_ioport(VBE_DISPI_INDEX_XRES, width); vbox_write_ioport(VBE_DISPI_INDEX_YRES, height); vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp); vbox_write_ioport(VBE_DISPI_INDEX_BPP, CRTC_FB(crtc)->format->cpp[0] * 8); vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED); vbox_write_ioport( VBE_DISPI_INDEX_X_OFFSET, vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x); vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET, vbox_crtc->fb_offset / pitch + crtc->y); } flags = VBVA_SCREEN_F_ACTIVE; flags |= (crtc->enabled && !vbox_crtc->blanked) ? 0 : VBVA_SCREEN_F_BLANK; flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0; hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id, x_offset, y_offset, crtc->x * bpp / 8 + crtc->y * pitch, pitch, width, height, vbox_crtc->blanked ? 0 : bpp, flags); } static int vbox_set_view(struct drm_crtc *crtc) { struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct vbox_private *vbox = crtc->dev->dev_private; struct vbva_infoview *p; /* * Tell the host about the view. This design originally targeted the * Windows XP driver architecture and assumed that each screen would * have a dedicated frame buffer with the command buffer following it, * the whole being a "view". The host works out which screen a command * buffer belongs to by checking whether it is in the first view, then * whether it is in the second and so on. The first match wins. We * cheat around this by making the first view be the managed memory * plus the first command buffer, the second the same plus the second * buffer and so on. */ p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_VIEW); if (!p) return -ENOMEM; p->view_index = vbox_crtc->crtc_id; p->view_offset = vbox_crtc->fb_offset; p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset + vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE; p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset; hgsmi_buffer_submit(vbox->guest_pool, p); hgsmi_buffer_free(vbox->guest_pool, p); return 0; } static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode) { struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct vbox_private *vbox = crtc->dev->dev_private; switch (mode) { case DRM_MODE_DPMS_ON: vbox_crtc->blanked = false; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: vbox_crtc->blanked = true; break; } mutex_lock(&vbox->hw_mutex); vbox_do_modeset(crtc, &crtc->hwmode); mutex_unlock(&vbox->hw_mutex); } static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } /* * Try to map the layout of virtual screens to the range of the input device. * Return true if we need to re-set the crtc modes due to screen offset * changes. */ static bool vbox_set_up_input_mapping(struct vbox_private *vbox) { struct drm_crtc *crtci; struct drm_connector *connectori; struct drm_framebuffer *fb1 = NULL; bool single_framebuffer = true; bool old_single_framebuffer = vbox->single_framebuffer; u16 width = 0, height = 0; /* * Are we using an X.Org-style single large frame-buffer for all crtcs? * If so then screen layout can be deduced from the crtc offsets. * Same fall-back if this is the fbdev frame-buffer. */ list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) { if (!fb1) { fb1 = CRTC_FB(crtci); if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb) break; } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) { single_framebuffer = false; } } if (single_framebuffer) { list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) { if (to_vbox_crtc(crtci)->crtc_id != 0) continue; vbox->single_framebuffer = true; vbox->input_mapping_width = CRTC_FB(crtci)->width; vbox->input_mapping_height = CRTC_FB(crtci)->height; return old_single_framebuffer != vbox->single_framebuffer; } } /* Otherwise calculate the total span of all screens. */ list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list, head) { struct vbox_connector *vbox_connector = to_vbox_connector(connectori); struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc; width = max_t(u16, width, vbox_crtc->x_hint + vbox_connector->mode_hint.width); height = max_t(u16, height, vbox_crtc->y_hint + vbox_connector->mode_hint.height); } vbox->single_framebuffer = false; vbox->input_mapping_width = width; vbox->input_mapping_height = height; return old_single_framebuffer != vbox->single_framebuffer; } static int vbox_crtc_do_set_base(struct drm_crtc *crtc, struct drm_framebuffer *old_fb, int x, int y) { struct vbox_private *vbox = crtc->dev->dev_private; struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct drm_gem_object *obj; struct vbox_framebuffer *vbox_fb; struct vbox_bo *bo; int ret; u64 gpu_addr; /* Unpin the previous fb. */ if (old_fb) { vbox_fb = to_vbox_framebuffer(old_fb); obj = vbox_fb->obj; bo = gem_to_vbox_bo(obj); ret = vbox_bo_reserve(bo, false); if (ret) return ret; vbox_bo_unpin(bo); vbox_bo_unreserve(bo); } vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc)); obj = vbox_fb->obj; bo = gem_to_vbox_bo(obj); ret = vbox_bo_reserve(bo, false); if (ret) return ret; ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); if (ret) { vbox_bo_unreserve(bo); return ret; } if (&vbox->fbdev->afb == vbox_fb) vbox_fbdev_set_base(vbox, gpu_addr); vbox_bo_unreserve(bo); /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */ vbox_crtc->fb_offset = gpu_addr; if (vbox_set_up_input_mapping(vbox)) { struct drm_crtc *crtci; list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) { vbox_set_view(crtc); vbox_do_modeset(crtci, &crtci->mode); } } return 0; } static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { return vbox_crtc_do_set_base(crtc, old_fb, x, y); } static int vbox_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct vbox_private *vbox = crtc->dev->dev_private; int ret; vbox_crtc_mode_set_base(crtc, x, y, old_fb); mutex_lock(&vbox->hw_mutex); ret = vbox_set_view(crtc); if (!ret) vbox_do_modeset(crtc, mode); hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, vbox->input_mapping_width, vbox->input_mapping_height); mutex_unlock(&vbox->hw_mutex); return ret; } static void vbox_crtc_disable(struct drm_crtc *crtc) { } static void vbox_crtc_prepare(struct drm_crtc *crtc) { } static void vbox_crtc_commit(struct drm_crtc *crtc) { } static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = { .dpms = vbox_crtc_dpms, .mode_fixup = vbox_crtc_mode_fixup, .mode_set = vbox_crtc_mode_set, /* .mode_set_base = vbox_crtc_mode_set_base, */ .disable = vbox_crtc_disable, .prepare = vbox_crtc_prepare, .commit = vbox_crtc_commit, }; static void vbox_crtc_reset(struct drm_crtc *crtc) { } static void vbox_crtc_destroy(struct drm_crtc *crtc) { drm_crtc_cleanup(crtc); kfree(crtc); } static const struct drm_crtc_funcs vbox_crtc_funcs = { .cursor_move = vbox_cursor_move, .cursor_set2 = vbox_cursor_set2, .reset = vbox_crtc_reset, .set_config = drm_crtc_helper_set_config, /* .gamma_set = vbox_crtc_gamma_set, */ .destroy = vbox_crtc_destroy, }; static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i) { struct vbox_crtc *vbox_crtc; vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL); if (!vbox_crtc) return NULL; vbox_crtc->crtc_id = i; drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs); drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256); drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs); return vbox_crtc; } static void vbox_encoder_destroy(struct drm_encoder *encoder) { drm_encoder_cleanup(encoder); kfree(encoder); } static struct drm_encoder *vbox_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; /* pick the encoder ids */ if (enc_id) return drm_encoder_find(connector->dev, NULL, enc_id); return NULL; } static const struct drm_encoder_funcs vbox_enc_funcs = { .destroy = vbox_encoder_destroy, }; static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode) { } static bool vbox_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static void vbox_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { } static void vbox_encoder_prepare(struct drm_encoder *encoder) { } static void vbox_encoder_commit(struct drm_encoder *encoder) { } static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = { .dpms = vbox_encoder_dpms, .mode_fixup = vbox_mode_fixup, .prepare = vbox_encoder_prepare, .commit = vbox_encoder_commit, .mode_set = vbox_encoder_mode_set, }; static struct drm_encoder *vbox_encoder_init(struct drm_device *dev, unsigned int i) { struct vbox_encoder *vbox_encoder; vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL); if (!vbox_encoder) return NULL; drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs, DRM_MODE_ENCODER_DAC, NULL); drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs); vbox_encoder->base.possible_crtcs = 1 << i; return &vbox_encoder->base; } /** * Generate EDID data with a mode-unique serial number for the virtual * monitor to try to persuade Unity that different modes correspond to * different monitors and it should not try to force the same resolution on * them. */ static void vbox_set_edid(struct drm_connector *connector, int width, int height) { enum { EDID_SIZE = 128 }; unsigned char edid[EDID_SIZE] = { 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */ 0x58, 0x58, /* manufacturer (VBX) */ 0x00, 0x00, /* product code */ 0x00, 0x00, 0x00, 0x00, /* serial number goes here */ 0x01, /* week of manufacture */ 0x00, /* year of manufacture */ 0x01, 0x03, /* EDID version */ 0x80, /* capabilities - digital */ 0x00, /* horiz. res in cm, zero for projectors */ 0x00, /* vert. res in cm */ 0x78, /* display gamma (120 == 2.2). */ 0xEE, /* features (standby, suspend, off, RGB, std */ /* colour space, preferred timing mode) */ 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54, /* chromaticity for standard colour space. */ 0x00, 0x00, 0x00, /* no default timings */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, /* no standard timings */ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02, 0x02, 0x02, /* descriptor block 1 goes below */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* descriptor block 2, monitor ranges */ 0x00, 0x00, 0x00, 0xFD, 0x00, 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */ 0x20, /* descriptor block 3, monitor name */ 0x00, 0x00, 0x00, 0xFC, 0x00, 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r', '\n', /* descriptor block 4: dummy data */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, /* number of extensions */ 0x00 /* checksum goes here */ }; int clock = (width + 6) * (height + 6) * 60 / 10000; unsigned int i, sum = 0; edid[12] = width & 0xff; edid[13] = width >> 8; edid[14] = height & 0xff; edid[15] = height >> 8; edid[54] = clock & 0xff; edid[55] = clock >> 8; edid[56] = width & 0xff; edid[58] = (width >> 4) & 0xf0; edid[59] = height & 0xff; edid[61] = (height >> 4) & 0xf0; for (i = 0; i < EDID_SIZE - 1; ++i) sum += edid[i]; edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF; drm_connector_update_edid_property(connector, (struct edid *)edid); } static int vbox_get_modes(struct drm_connector *connector) { struct vbox_connector *vbox_connector = NULL; struct drm_display_mode *mode = NULL; struct vbox_private *vbox = NULL; unsigned int num_modes = 0; int preferred_width, preferred_height; vbox_connector = to_vbox_connector(connector); vbox = connector->dev->dev_private; /* * Heuristic: we do not want to tell the host that we support dynamic * resizing unless we feel confident that the user space client using * the video driver can handle hot-plug events. So the first time modes * are queried after a "master" switch we tell the host that we do not, * and immediately after we send the client a hot-plug notification as * a test to see if they will respond and query again. * That is also the reason why capabilities are reported to the host at * this place in the code rather than elsewhere. * We need to report the flags location before reporting the IRQ * capability. */ hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) + HOST_FLAGS_OFFSET); if (vbox_connector->vbox_crtc->crtc_id == 0) vbox_report_caps(vbox); if (!vbox->initial_mode_queried) { if (vbox_connector->vbox_crtc->crtc_id == 0) { vbox->initial_mode_queried = true; vbox_report_hotplug(vbox); } return drm_add_modes_noedid(connector, 800, 600); } num_modes = drm_add_modes_noedid(connector, 2560, 1600); preferred_width = vbox_connector->mode_hint.width ? vbox_connector->mode_hint.width : 1024; preferred_height = vbox_connector->mode_hint.height ? vbox_connector->mode_hint.height : 768; mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height, 60, false, false, false); if (mode) { mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); ++num_modes; } vbox_set_edid(connector, preferred_width, preferred_height); if (vbox_connector->vbox_crtc->x_hint != -1) drm_object_property_set_value(&connector->base, vbox->dev->mode_config.suggested_x_property, vbox_connector->vbox_crtc->x_hint); else drm_object_property_set_value(&connector->base, vbox->dev->mode_config.suggested_x_property, 0); if (vbox_connector->vbox_crtc->y_hint != -1) drm_object_property_set_value(&connector->base, vbox->dev->mode_config.suggested_y_property, vbox_connector->vbox_crtc->y_hint); else drm_object_property_set_value(&connector->base, vbox->dev->mode_config.suggested_y_property, 0); return num_modes; } static enum drm_mode_status vbox_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { return MODE_OK; } static void vbox_connector_destroy(struct drm_connector *connector) { drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); } static enum drm_connector_status vbox_connector_detect(struct drm_connector *connector, bool force) { struct vbox_connector *vbox_connector; vbox_connector = to_vbox_connector(connector); return vbox_connector->mode_hint.disconnected ? connector_status_disconnected : connector_status_connected; } static int vbox_fill_modes(struct drm_connector *connector, u32 max_x, u32 max_y) { struct vbox_connector *vbox_connector; struct drm_device *dev; struct drm_display_mode *mode, *iterator; vbox_connector = to_vbox_connector(connector); dev = vbox_connector->base.dev; list_for_each_entry_safe(mode, iterator, &connector->modes, head) { list_del(&mode->head); drm_mode_destroy(dev, mode); } return drm_helper_probe_single_connector_modes(connector, max_x, max_y); } static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = { .mode_valid = vbox_mode_valid, .get_modes = vbox_get_modes, .best_encoder = vbox_best_single_encoder, }; static const struct drm_connector_funcs vbox_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = vbox_connector_detect, .fill_modes = vbox_fill_modes, .destroy = vbox_connector_destroy, }; static int vbox_connector_init(struct drm_device *dev, struct vbox_crtc *vbox_crtc, struct drm_encoder *encoder) { struct vbox_connector *vbox_connector; struct drm_connector *connector; vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL); if (!vbox_connector) return -ENOMEM; connector = &vbox_connector->base; vbox_connector->vbox_crtc = vbox_crtc; drm_connector_init(dev, connector, &vbox_connector_funcs, DRM_MODE_CONNECTOR_VGA); drm_connector_helper_add(connector, &vbox_connector_helper_funcs); connector->interlace_allowed = 0; connector->doublescan_allowed = 0; drm_mode_create_suggested_offset_properties(dev); drm_object_attach_property(&connector->base, dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); drm_connector_register(connector); drm_connector_attach_encoder(connector, encoder); return 0; } int vbox_mode_init(struct drm_device *dev) { struct vbox_private *vbox = dev->dev_private; struct drm_encoder *encoder; struct vbox_crtc *vbox_crtc; unsigned int i; int ret; /* vbox_cursor_init(dev); */ for (i = 0; i < vbox->num_crtcs; ++i) { vbox_crtc = vbox_crtc_init(dev, i); if (!vbox_crtc) return -ENOMEM; encoder = vbox_encoder_init(dev, i); if (!encoder) return -ENOMEM; ret = vbox_connector_init(dev, vbox_crtc, encoder); if (ret) return ret; } return 0; } void vbox_mode_fini(struct drm_device *dev) { /* vbox_cursor_fini(dev); */ } /** * Copy the ARGB image and generate the mask, which is needed in case the host * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set * if the corresponding alpha value in the ARGB image is greater than 0xF0. */ static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height, size_t mask_size) { size_t line_size = (width + 7) / 8; u32 i, j; memcpy(dst + mask_size, src, width * height * 4); for (i = 0; i < height; ++i) for (j = 0; j < width; ++j) if (((u32 *)src)[i * width + j] > 0xf0000000) dst[i * line_size + j / 8] |= (0x80 >> (j % 8)); } static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, u32 handle, u32 width, u32 height, s32 hot_x, s32 hot_y) { struct vbox_private *vbox = crtc->dev->dev_private; struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct ttm_bo_kmap_obj uobj_map; size_t data_size, mask_size; struct drm_gem_object *obj; u32 flags, caps = 0; struct vbox_bo *bo; bool src_isiomem; u8 *dst = NULL; u8 *src; int ret; /* * Re-set this regularly as in 5.0.20 and earlier the information was * lost on save and restore. */ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, vbox->input_mapping_width, vbox->input_mapping_height); if (!handle) { bool cursor_enabled = false; struct drm_crtc *crtci; /* Hide cursor. */ vbox_crtc->cursor_enabled = false; list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) { if (to_vbox_crtc(crtci)->cursor_enabled) cursor_enabled = true; } if (!cursor_enabled) hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0, 0, 0, NULL, 0); return 0; } vbox_crtc->cursor_enabled = true; if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT || width == 0 || height == 0) return -EINVAL; ret = hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps); if (ret) return ret; if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) { /* * -EINVAL means cursor_set2() not supported, -EAGAIN means * retry at once. */ return -EBUSY; } obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { DRM_ERROR("Cannot find cursor object %x for crtc\n", handle); return -ENOENT; } bo = gem_to_vbox_bo(obj); ret = vbox_bo_reserve(bo, false); if (ret) goto out_unref_obj; /* * The mask must be calculated based on the alpha * channel, one bit per ARGB word, and must be 32-bit * padded. */ mask_size = ((width + 7) / 8 * height + 3) & ~3; data_size = width * height * 4 + mask_size; vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width); vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height); vbox->cursor_width = width; vbox->cursor_height = height; vbox->cursor_data_size = data_size; dst = vbox->cursor_data; ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map); if (ret) { vbox->cursor_data_size = 0; goto out_unreserve_bo; } src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem); if (src_isiomem) { DRM_ERROR("src cursor bo not in main memory\n"); ret = -EIO; goto out_unmap_bo; } copy_cursor_image(src, dst, width, height, mask_size); flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags, vbox->cursor_hot_x, vbox->cursor_hot_y, width, height, dst, data_size); out_unmap_bo: ttm_bo_kunmap(&uobj_map); out_unreserve_bo: vbox_bo_unreserve(bo); out_unref_obj: drm_gem_object_put_unlocked(obj); return ret; } static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y) { struct vbox_private *vbox = crtc->dev->dev_private; u32 flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; s32 crtc_x = vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint; s32 crtc_y = vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint; u32 host_x, host_y; u32 hot_x = 0; u32 hot_y = 0; int ret; /* * We compare these to unsigned later and don't * need to handle negative. */ if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0) return 0; ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x, y + crtc_y, &host_x, &host_y); /* * The only reason we have vbox_cursor_move() is that some older clients * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and * use DRM_MODE_CURSOR_MOVE to set the hot-spot. * * However VirtualBox 5.0.20 and earlier has a bug causing it to return * 0,0 as host cursor location after a save and restore. * * To work around this we ignore a 0, 0 return, since missing the odd * time when it legitimately happens is not going to hurt much. */ if (ret || (host_x == 0 && host_y == 0)) return ret; if (x + crtc_x < host_x) hot_x = min(host_x - x - crtc_x, vbox->cursor_width); if (y + crtc_y < host_y) hot_y = min(host_y - y - crtc_y, vbox->cursor_height); if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y) return 0; vbox->cursor_hot_x = hot_x; vbox->cursor_hot_y = hot_y; return hgsmi_update_pointer_shape(vbox->guest_pool, flags, hot_x, hot_y, vbox->cursor_width, vbox->cursor_height, vbox->cursor_data, vbox->cursor_data_size); }
kronat/linux
drivers/staging/vboxvideo/vbox_mode.c
C
gpl-2.0
26,301
/* * include/linux/mfd/wm8994/pdata.h -- Platform data for WM8994 * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #ifndef __MFD_WM8994_PDATA_H__ #define __MFD_WM8994_PDATA_H__ #define WM8994_NUM_LDO 2 #define WM8994_NUM_GPIO 11 struct wm8994_ldo_pdata { /** GPIOs to enable regulator, 0 or less if not available */ int enable; const struct regulator_init_data *init_data; }; #define WM8994_CONFIGURE_GPIO 0x10000 #define WM8994_NUM_AIF 2 #define WM8994_DRC_REGS 5 #define WM8994_EQ_REGS 21 #define WM8958_MBC_CUTOFF_REGS 20 #define WM8958_MBC_COEFF_REGS 48 #define WM8958_MBC_COMBINED_REGS 56 #define WM8958_VSS_HPF_REGS 2 #define WM8958_VSS_REGS 148 #define WM8958_ENH_EQ_REGS 32 /** * DRC configurations are specified with a label and a set of register * values to write (the enable bits will be ignored). At runtime an * enumerated control will be presented for each DRC block allowing * the user to choose the configration to use. * * Configurations may be generated by hand or by using the DRC control * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/ * for details. */ struct wm8994_drc_cfg { const char *name; u16 regs[WM8994_DRC_REGS]; }; /** * ReTune Mobile configurations are specified with a label, sample * rate and set of values to write (the enable bits will be ignored). * * Configurations are expected to be generated using the ReTune Mobile * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/ */ struct wm8994_retune_mobile_cfg { const char *name; unsigned int rate; u16 regs[WM8994_EQ_REGS]; }; /** * Multiband compressor configurations are specified with a label and * two sets of values to write. Configurations are expected to be * generated using the multiband compressor configuration panel in * WISCE - see http://www.wolfsonmicro.com/wisce/ */ struct wm8958_mbc_cfg { const char *name; u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS]; u16 coeff_regs[WM8958_MBC_COEFF_REGS]; /* Coefficient layout when using MBC+VSS firmware */ u16 combined_regs[WM8958_MBC_COMBINED_REGS]; }; /** * VSS HPF configurations are specified with a label and two values to * write. Configurations are expected to be generated using the * multiband compressor configuration panel in WISCE - see * http://www.wolfsonmicro.com/wisce/ */ struct wm8958_vss_hpf_cfg { const char *name; u16 regs[WM8958_VSS_HPF_REGS]; }; /** * VSS configurations are specified with a label and array of values * to write. Configurations are expected to be generated using the * multiband compressor configuration panel in WISCE - see * http://www.wolfsonmicro.com/wisce/ */ struct wm8958_vss_cfg { const char *name; u16 regs[WM8958_VSS_REGS]; }; /** * Enhanced EQ configurations are specified with a label and array of * values to write. Configurations are expected to be generated using * the multiband compressor configuration panel in WISCE - see * http://www.wolfsonmicro.com/wisce/ */ struct wm8958_enh_eq_cfg { const char *name; u16 regs[WM8958_ENH_EQ_REGS]; }; /** * Microphone detection rates, used to tune response rates and power * consumption for WM8958/WM1811 microphone detection. * * @sysclk: System clock rate to use this configuration for. * @idle: True if this configuration should use when no accessory is detected, * false otherwise. * @start: Value for MICD_BIAS_START_TIME register field (not shifted). * @rate: Value for MICD_RATE register field (not shifted). */ struct wm8958_micd_rate { int sysclk; bool idle; int start; int rate; }; struct wm8994_pdata { int gpio_base; /** * Default values for GPIOs if non-zero, WM8994_CONFIGURE_GPIO * can be used for all zero values. */ int gpio_defaults[WM8994_NUM_GPIO]; struct wm8994_ldo_pdata ldo[WM8994_NUM_LDO]; int irq_base; /** Base IRQ number for WM8994, required for IRQs */ unsigned long irq_flags; /** user irq flags */ int num_drc_cfgs; struct wm8994_drc_cfg *drc_cfgs; int num_retune_mobile_cfgs; struct wm8994_retune_mobile_cfg *retune_mobile_cfgs; int num_mbc_cfgs; struct wm8958_mbc_cfg *mbc_cfgs; int num_vss_cfgs; struct wm8958_vss_cfg *vss_cfgs; int num_vss_hpf_cfgs; struct wm8958_vss_hpf_cfg *vss_hpf_cfgs; int num_enh_eq_cfgs; struct wm8958_enh_eq_cfg *enh_eq_cfgs; int num_micd_rates; struct wm8958_micd_rate *micd_rates; /* LINEOUT can be differential or single ended */ unsigned int lineout1_diff:1; unsigned int lineout2_diff:1; /* Common mode feedback */ unsigned int lineout1fb:1; unsigned int lineout2fb:1; /* Delay between detecting a jack and starting microphone * detect (specified in ms) */ int micdet_delay; /* Delay between microphone detect completing and reporting on * insert (specified in ms) */ int mic_id_delay; /* IRQ for microphone detection if brought out directly as a * signal. */ int micdet_irq; /* WM8994 microphone biases: 0=0.9*AVDD1 1=0.65*AVVD1 */ unsigned int micbias1_lvl:1; unsigned int micbias2_lvl:1; /* WM8994 jack detect threashold levels, see datasheet for values */ unsigned int jd_scthr:2; unsigned int jd_thr:2; /* Configure WM1811 jack detection for use with external capacitor */ unsigned int jd_ext_cap:1; /* WM8958 microphone bias configuration */ int micbias[2]; /* WM8958 microphone detection ranges */ u16 micd_lvl_sel; /* Disable the internal pull downs on the LDOs if they are * always driven (eg, connected to an always on supply or * GPIO that always drives an output. If they float power * consumption will rise. */ bool ldo_ena_always_driven; /* * SPKMODE must be pulled internally by the device on this * system. */ bool spkmode_pu; /** * Override the params for an AIF */ int override_rates[WM8994_NUM_AIF]; }; #endif
cool104/kernel-zenfone-4
include/linux/mfd/wm8994/pdata.h
C
gpl-2.0
6,261
<?php if (file_exists(__DIR__.'/../var/cache/dev/App_KernelDevDebugContainer.preload.php')) { require __DIR__.'/../var/cache/dev/App_KernelDevDebugContainer.preload.php'; }
BeWelcome/rox
src/.preload.php
PHP
gpl-2.0
178
<?php /** * The singleton class that contains all functionality regarding the "override WordPress comments" functionality. * * @package Muut * @copyright 2014 Muut Inc */ // Don't load directly if ( !defined( 'ABSPATH' ) ) { die( '-1' ); } if ( !class_exists( 'Muut_Comment_Overrides' ) ) { /** * Muut Comment Overrides class. * * @package Muut * @author Paul Hughes * @since 3.0 */ class Muut_Comment_Overrides { /** * @static * @property Muut_Comment_Overrides The instance of the class. */ protected static $instance; /** * The singleton method. * * @return Muut_Comment_Overrides The instance. * @author Paul Hughes * @since 3.0 */ public static function instance() { if ( !is_a( self::$instance, __CLASS__ ) ) { self::$instance = new self(); } return self::$instance; } /** * The class constructor. * * @return Muut_Comment_Overrides * @author Paul Hughes * @since 3.0 */ protected function __construct() { $this->addActions(); $this->addFilters(); } /** * Adds the actions used by this class. * * @return void * @author Paul Hughes * @since 3.0 */ protected function addActions() { add_action( 'shutdown', array( $this, 'flushPostCommentCount' ), 10 ); } /** * Adds the filters used by this class. * * @return void * @author Paul Hughes * @since 3.0 */ protected function addFilters() { add_filter( 'comments_template', array( $this, 'commentsTemplate' ) ); add_filter( 'get_comments_link', array( $this, 'commentsLink' ), 10, 2 ); add_filter( 'get_comments_number', array( $this, 'muutCommentsNumber' ), 10, 2 ); add_filter( 'wp_head', array( $this, 'fetchCommentCountForMuutPosts' ) ); } /** * Gets the post's comments path. * * @param int $post_id The ID of the post we are fetching the Muut comments path for. * @param bool $full_path Whether to retrieve the full path, including the root forum. * @return string The post's Muut comments path. * @author Paul Hughes * @since 3.0 */ public function getCommentsPath( $post_id, $full_path = false ) { if ( !is_numeric( $post_id) ) { return false; } $domain = get_post_meta( $post_id, 'muut_post_domain', true ); $post_commenting_options = Muut_Post_Utility::getPostOption( $post_id, 'commenting_settings' ); $default_type = apply_filters( 'muut_default_commenting_type', 'flat' ); $commenting_type = isset( $post_commenting_options['type'] ) ? $post_commenting_options['type'] : $default_type; if ( $domain == '' ) { // Assign the domain name to the post for permanent reference. $domain = muut()->getOption( 'comments_base_domain' ); update_post_meta( $post_id, 'muut_post_domain', apply_filters( 'muut_post_comments_domain', $domain, $post_id ) ); } $post = get_post( $post_id ); $update_timestamps = muut()->getOption( 'update_timestamps', array() ); if ( isset( $update_timestamps['3.0'] ) && get_post_time( 'U', false, $post ) < $update_timestamps['3.0'] ) { $path = $domain . ':' . sanitize_title( $post->post_title ); } else { $path = $domain . '/' . $post_id; if ( $commenting_type == 'flat' ) { $path .= ':comments'; } } $path = apply_filters( 'muut_comments_path_for_post', $path, $post_id, $commenting_type ); if ( !$full_path ) { return $path; } else { return muut()->getForumName() . '/' . $path; } } /** * Gets a comment section's full index URI. * * @param int $post_id The post whose comment section remote URI we are fetching. * @return string The full index URI. * @author Paul Hughes * @since 3.0.1 */ public function getCommentsIndexUri( $post_id ) { if( !is_numeric( $post_id ) ) { return false; } $base_uri = muut()->getForumIndexUri(); $uri = $base_uri . $this->getCommentsPath( $post_id, false ); return apply_filters( 'muut_comments_index_uri', $uri, $post_id ); } /** * Gets the proper comments template when overrides are on. * * @param string $template The current comments template being fetched. * @return string The modified template to fetch. * @author Paul Hughes * @since 3.0 */ public function commentsTemplate( $template ) { global $post; $disabled_post_types = apply_filters( 'muut_disabled_comment_override_post_types', array() ); if ( muut()->getForumName() != '' && muut()->getOption( 'replace_comments', false ) && !in_array( $post->post_type, $disabled_post_types ) ) { if ( Muut_Post_Utility::isMuutCommentingPost( $post->ID ) ) { // TODO: Make it so it checks if the post type is supposed to be overridden. $template = Muut_Template_Loader::instance()->locateTemplate( 'comments.php' ); } } return $template; } /** * Gets the comments anchor for a given post. * * @param int $post_id The ID of the post we are getting the comments anchor for. * @param bool $echo Whether to echo the value. * @return string|void The markup for the Muut comments embed or void if echoed. * @author Paul Hughes * @since 3.0 */ public function commentsOverrideAnchor( $post_id, $echo = true ) { if ( !is_numeric( $post_id ) ) { return false; } $path = $this->getCommentsPath( $post_id, true ); $post_type = get_post_type_object( get_post_type( $post_id ) ); $post_type_name = $post_type->labels->singular_name; $post_commenting_options = Muut_Post_Utility::getPostOption( $post_id, 'commenting_settings' ); $settings = 'data-show_online="false" '; if ( isset( $post_commenting_options['type'] ) && $post_commenting_options['type'] == 'threaded' ) { $post_title = substr( sprintf( __( 'Comments on %s', 'muut' ), get_the_title( $post_id ) ), 0, 82 ); $settings .= 'data-show_title="false" title="' . $post_title . '" data-channel="' . $post_title . '"'; } else { $post_title = substr( $post_type_name . ': ' . get_the_title( $post_id ), 0, 82 ); $settings .= 'data-show_title="true" title="' . $post_title . '" data-channel="' . __( 'Comments', 'muut' ) . '" '; } if ( isset( $post_commenting_options['disable_uploads'] ) && $post_commenting_options['disable_uploads'] == '1' ) { $settings .= 'data-upload="false" '; } else { $settings .= 'data-upload="true" '; } if ( !$path ) return false; $id_attr = muut()->getWrapperCssId() ? 'id="' . muut()->getWrapperCssId() . '_comments"' : ''; $type = isset( $post_commenting_options['type'] ) && $post_commenting_options['type'] ? $post_commenting_options['type'] : 'flat'; $anchor = '<div id="respond"><section id="muut_comments"><a ' . $id_attr . ' class="' . muut()->getWrapperCssClass() . '" href="' . muut()->getContentPathPrefix() . 'i/' . $path . '" ' . $settings . '>' . __( 'Comments', 'muut' ) . '</a></section></div>'; $anchor = apply_filters( 'muut_comment_overrides_embed_content', $anchor, $post_id, $type ); $anchor = apply_filters( 'muut_embed_content', $anchor, $post_id, $type ); if ( $echo ) { echo $anchor; } else { return $anchor; } } /** * Filters the link for the WP get_comments_link function. Posts with Muut comments should link to * their anchor. * * @param string $link The current link. * @param int $post_id The post ID. * @return string The filtered link. * @author Paul Hughes * @since 3.0 */ public function commentsLink( $link, $post_id ) { if ( Muut_Post_Utility::isMuutCommentingPost( $post_id ) ) { $link = get_permalink( $post_id ) . '#' . muut()->getWrapperCssId() . '_comments'; } return $link; } /** * Removes the post meta store of the comment count after a given post's page is loaded (effectively clearing the cache) * Next time the post count is then requested, it will grab a fresh copy. * * @return void * @author Paul Hughes * @since 3.0.5 */ public function flushPostCommentCount() { if ( is_single() && Muut_Post_Utility::isMuutCommentingPost( get_the_ID() ) ) { delete_post_meta( get_the_ID(), 'muut_comments_count' ); } } /** * For posts that have Muut commenting enabled, set the number of comments to zero so that it does not * (in most themes) show a comment count, but rather sticks with "Leave a reply." * * @param int $count The current comment count. * @param int $post_id The post ID. * @return int The filtered count. * @author Paul Hughes * @since 3.0 */ public function muutCommentsNumber( $count, $post_id ) { if ( Muut_Post_Utility::isMuutCommentingPost( $post_id ) ) { // If there is no cached value, let's go get it. if ( wp_cache_get( "muut-comments-{$post_id}" , 'counts' ) === false ) { $post = get_post( $post_id ); if ( is_a( $post, 'WP_Post' ) ) { $count = get_post_meta( $post_id, 'muut_comments_count', true ); if ( $count !== '' ) { wp_cache_set( "muut-comments-{$post_id}", $count, 'counts' ); } } } $count = wp_cache_get( "muut-comments-{$post_id}" , 'counts' ); } return $count; } /** * Gets (and caches) the comment counts for posts in the main query. * * @param array $posts The array of WP_Post objects that were fetched in the main query. * @return array The same array. * @author Paul Hughes * @since 3.0 */ public function fetchCommentCountForMuutPosts() { global $wp_query; // Only execute this functionality if "do not fetch" is not set. // That filter can be used (set to true) to prevent any of this from executing. if ( !apply_filters( 'muut_do_not_fetch_post_counts', false ) && $wp_query->is_main_query() ) { $post_count_queue = array(); $posts = $wp_query->posts; foreach ( $posts as $post ) { if ( Muut_Post_Utility::isMuutCommentingPost( $post->ID ) && wp_cache_get( "muut-comments-{$post->ID}" , 'counts' ) === false && get_post_meta( $post->ID, 'muut_comments_count', true ) === '' ) { $path = '/' . $this->getCommentsPath( $post->ID, true ); $post_count_queue[$post->ID] = $path; } } // As long as there is at least one post that uses Muut commenting and doesn't have a cached value... if ( count( $post_count_queue ) > 0 ) { global $wp_version; $api_endpoint = 'https://' . Muut::MUUTAPISERVER . '/postcounts'; $api_args = '?path=' . join( '&path=', $post_count_queue ); $api_call = $api_endpoint . $api_args; $fetch_args = array( 'user-agent' => 'WordPress/' . $wp_version . '; ' . get_bloginfo( 'url' ) . ' MuutForum/' . muut()->getForumName(), 'timeout' => apply_filters( 'muut_api_post_counts_timeout', '2' ), ); $response = wp_remote_get( $api_call, $fetch_args ); if ( is_wp_error( $response ) && ( muut()->isInDevelopMode() || !apply_filters( 'muut_suppress_api_errors', true ) ) ) { error_log( 'Something went wrong fetching Muut API: ' . $response->get_error_message() ); } else { if ( wp_remote_retrieve_response_code( $response ) == 200 ) { $body = wp_remote_retrieve_body( $response ); $return_array = json_decode( $body ); // Cache values for each returned post comment count. if ( !is_null( $return_array ) ) { $post_array = array_flip( $post_count_queue ); foreach ( $post_array as $url => $id ) { update_post_meta( $id, 'muut_comments_count', $return_array->$url->size ); wp_cache_set( "muut-comments-{$id}", $return_array->$url->size, 'counts' ); } } } } } } return $posts; } /** * Gets the type of commenting enabled on a given commenting post (threaded or flat). * * @param int $post_id The commenting post id. * @return null|string The commenting type that is enabled or null if failed. * @author Paul Hughes * @since 3.0.2.1 */ public function getCommentingPostCommentType( $post_id ) { $post_commenting_options = Muut_Post_Utility::getPostOption( $post_id, 'commenting_settings' ); if ( isset( $post_commenting_options['type'] ) ) { return $post_commenting_options['type']; } } } }
AnduZhang/resource-center
wp-content/plugins/muut/lib/comment-overrides.class.php
PHP
gpl-2.0
12,200
<?php /** * @package HikaShop for Joomla! * @version 2.1.2 * @author hikashop.com * @copyright (C) 2010-2013 HIKARI SOFTWARE. All rights reserved. * @license GNU/GPLv3 http://www.gnu.org/licenses/gpl-3.0.html */ defined('_JEXEC') or die('Restricted access'); ?><?php if(!function_exists('curl_init')) { echo '<tr><td colspan="2"><strong>The FirstData payment plugin needs the CURL library installed but it seems that it is not available on your server. Please contact your web hosting to set it up.</strong></td></tr>'; } ?><tr> <td class="key"> <label for="data[payment][payment_params][login]"> Store ID </label> </td> <td> <input type="text" name="data[payment][payment_params][login]" value="<?php echo @$this->element->payment_params->login; ?>" /> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][password]"> API Password </label> </td> <td> <input type="text" name="data[payment][payment_params][password]" value="<?php echo @$this->element->payment_params->password; ?>" /> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][domain]"> Payment Server </label> </td> <td> <?php $values = array(); $values[] = JHTML::_('select.option', 'ws.firstdataglobalgateway.com', 'Production Server'); $values[] = JHTML::_('select.option', 'ws.merchanttest.firstdataglobalgateway.com', 'Test Server'); echo JHTML::_('select.genericlist', $values, "data[payment][payment_params][domain]" , 'class="inputbox" size="1"', 'value', 'text', @$this->element->payment_params->domain ); ?> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][pem_file]"> PEM file </label> </td> <td> <input type="text" name="data[payment][payment_params][pem_file]" value="<?php echo @$this->element->payment_params->pem_file; ?>" /> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][key_file]"> KEY file </label> </td> <td> <input type="text" name="data[payment][payment_params][key_file]" value="<?php echo @$this->element->payment_params->key_file; ?>" /> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][key_passwd]"> KEY password </label> </td> <td> <input type="text" name="data[payment][payment_params][key_passwd]" value="<?php echo @$this->element->payment_params->key_passwd; ?>" /> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][ask_ccv]"> Ask CCV </label> </td> <td> <?php echo JHTML::_('hikaselect.booleanlist', "data[payment][payment_params][ask_ccv]" , '',@$this->element->payment_params->ask_ccv ); ?> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][debug]"> <?php echo JText::_( 'DEBUG' ); ?> </label> </td> <td> <?php echo JHTML::_('hikaselect.booleanlist', "data[payment][payment_params][debug]" , '',@$this->element->payment_params->debug ); ?> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][return_url]"> <?php echo JText::_( 'RETURN_URL' ); ?> </label> </td> <td> <input type="text" name="data[payment][payment_params][return_url]" value="<?php echo @$this->element->payment_params->return_url; ?>" /> </td> </tr> <tr> <td class="key"> <label for="data[payment][payment_params][verified_status]"> <?php echo JText::_( 'VERIFIED_STATUS' ); ?> </label> </td> <td> <?php echo $this->data['category']->display("data[payment][payment_params][verified_status]",@$this->element->payment_params->verified_status); ?> </td> </tr>
Jasonudoo/platform
tmp/hikashop/back/extensions/plg_hikashoppayment_firstdata/firstdata_configuration.php
PHP
gpl-2.0
3,692
// // tick_count_timer.cpp // ~~~~~~~~~~~~~~~~~~~~ // // Copyright (c) 2003-2008 Christopher M. Kohlhoff (chris at kohlhoff dot com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #include <boost/asio.hpp> #include <ctime> #include <iostream> #if defined(WIN32) # include <windows.h> #else # error This example is for Windows only! #endif struct tick_count_traits { // The time type. This type has no constructor that takes a DWORD to ensure // that the timer can only be used with relative times. class time_type { public: time_type() : ticks_(0) {} private: friend struct tick_count_traits; DWORD ticks_; }; // The duration type. class duration_type { public: duration_type() : ticks_(0) {} duration_type(DWORD ticks) : ticks_(ticks) {} private: friend struct tick_count_traits; DWORD ticks_; }; // Get the current time. static time_type now() { time_type result; result.ticks_ = ::GetTickCount(); return result; } // Add a duration to a time. static time_type add(const time_type& t, const duration_type& d) { time_type result; result.ticks_ = t.ticks_ + d.ticks_; return result; } // Subtract one time from another. static duration_type subtract(const time_type& t1, const time_type& t2) { return duration_type(t1.ticks_ - t2.ticks_); } // Test whether one time is less than another. static bool less_than(const time_type& t1, const time_type& t2) { // DWORD tick count values wrap periodically, so we'll use a heuristic that // says that if subtracting t1 from t2 yields a value smaller than 2^31, // then t1 is probably less than t2. This means that we can't handle // durations larger than 2^31, which shouldn't be a problem in practice. return (t2.ticks_ - t1.ticks_) < static_cast<DWORD>(1 << 31); } // Convert to POSIX duration type. static boost::posix_time::time_duration to_posix_duration( const duration_type& d) { return boost::posix_time::milliseconds(d.ticks_); } }; typedef boost::asio::basic_deadline_timer< DWORD, tick_count_traits> tick_count_timer; void handle_timeout(const boost::system::error_code&) { std::cout << "handle_timeout\n"; } int main() { try { boost::asio::io_service io_service; tick_count_timer timer(io_service, 5000); std::cout << "Starting synchronous wait\n"; timer.wait(); std::cout << "Finished synchronous wait\n"; timer.expires_from_now(5000); std::cout << "Starting asynchronous wait\n"; timer.async_wait(handle_timeout); io_service.run(); std::cout << "Finished asynchronous wait\n"; } catch (std::exception& e) { std::cout << "Exception: " << e.what() << "\n"; } return 0; }
scs/uclinux
lib/boost/boost_1_38_0/libs/asio/example/timers/tick_count_timer.cpp
C++
gpl-2.0
2,868
<!DOCTYPE html> <!--[if lt IE 7 ]><html <?php language_attributes(); ?> class="no-js ie ie6 lte7 lte8 lte9"><![endif]--> <!--[if IE 7 ]><html <?php language_attributes(); ?> class="no-js ie ie7 lte7 lte8 lte9"><![endif]--> <!--[if IE 8 ]><html <?php language_attributes(); ?> class="no-js ie ie8 lte8 lte9"><![endif]--> <!--[if IE 9 ]><html <?php language_attributes(); ?> class="no-js ie ie9 lte9"><![endif]--> <!--[if (gt IE 9)|!(IE)]><!--><html <?php language_attributes(); ?> class="no-js"><!--<![endif]--> <head> <meta charset="<?php bloginfo( 'charset' ); ?>" /> <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1.5, minimum-scale=0.5"> <title><?php wp_title( 'by', true, 'right' ); bloginfo( 'name' ); ?></title> <link rel="profile" href="http://gmpg.org/xfn/11" /> <link rel="stylesheet" type="text/css" media="all" href="<?php bloginfo( 'stylesheet_url' ); ?>" /> <link rel="pingback" href="<?php bloginfo( 'pingback_url' ); ?>" /> <link rel="shortcut icon" href="<?php echo get_bloginfo('template_directory'); ?>/images/favicon.ico" /> <!--[if lt IE 9]> <script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script> <![endif]--> <?php wp_enqueue_script('jquery'); if ( is_singular() && get_option( 'thread_comments' ) ) wp_enqueue_script( 'comment-reply' ); $options = get_option ( 'svbtle_options' ); echo $options['google_analytics']; if( isset( $options['color'] ) && '' != $options['color'] ) $color = $options['color']; else $color = "#ff0000"; ?> <style>blockquote {border-color: <?php echo $color ?>;}figure.logo, aside.alsoby li a:hover, aside.kudo.complete span.circle {background-color: <?php echo $color ?>;}section.preview header#begin h2,ul#user_meta a:hover,nav.pagination span.next a,nav.pagination span.prev a {color: <?php echo $color ?>;}ul#user_meta a:hover,nav.pagination span.next a,nav.pagination span.prev a {border-color: <?php echo $color ?>;}::-moz-selection { background: <?php echo $color ?>; color: #fff; text-shadow: none;}::selection { background: <?php echo $color ?>; color: #fff; text-shadow: none;} </style> <?php wp_head(); ?> </head> <body <?php body_class(); ?>> <header id="sidebar"> <figure class="logo medium"> <a href="<?php echo home_url( '/' ); ?>"><?php bloginfo( 'name' ); ?></a> </figure> <h1><a href="<?php echo home_url( '/' ); ?>"><?php bloginfo( 'name' ); ?></a></h1> <h2><a href="<?php echo home_url( '/' ); ?>"><?php echo $options['theme_username'] ?></a></h2> <h3><?php bloginfo( 'description' ); ?></h3> <ul id="user_nav"> <?php wp_nav_menu( array( 'theme_location' => 'primary' ) ); ?> <?php if ($options['rss-link']): ?> <li class="link feed"> <a href="<?php bloginfo('rss_url'); ?>">feed</a> </li> <?php endif ?> <?php if ( is_active_sidebar( 'sidebar' ) ) dynamic_sidebar( 'sidebar' ); ?> </ul> <aside id="svbtle_linkback"> <a href="https://github.com/gravityonmars/wp-svbtle"> <span class="logo_square"><span class="logo_circle">&nbsp;</span></span>&nbsp;<span class="svbtle">wp-svbtle</span> </a> </aside> </header> <section id="river" role="main"> <?php if (isset($_GET['not_found'])): ?> <div id="notice"><span>:(</span><br><br>Not found.</div> <?php endif; ?>
lcdvirgo/htdocs
wp-content/themes/svbtle/header.php
PHP
gpl-2.0
3,417
########################################################################### # # This program is part of Zenoss Core, an open source monitoring platform. # Copyright (C) 2009, Zenoss Inc. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as published by # the Free Software Foundation. # # For complete information please visit: http://www.zenoss.com/oss/ # ########################################################################### __doc__="""Esx Plugin to gather information about virtual machines running under a VMWare ESX server v3.0 """ import Globals from Products.DataCollector.plugins.CollectorPlugin \ import SnmpPlugin, GetTableMap from Products.DataCollector.plugins.DataMaps \ import ObjectMap class Esx(SnmpPlugin): # compname = "os" relname = "guestDevices" modname = 'ZenPacks.zenoss.ZenossVirtualHostMonitor.VirtualMachine' columns = { '.1': 'snmpindex', '.2': 'displayName', '.4': 'osType', '.5': 'memory', '.6': 'adminStatus', '.7': 'vmid', '.8': 'operStatus', } snmpGetTableMaps = ( GetTableMap('vminfo', '.1.3.6.1.4.1.6876.2.1.1', columns), ) def process(self, device, results, log): log.info('processing %s for device %s', self.name(), device.id) getdata, tabledata = results table = tabledata.get("vminfo") rm = self.relMap() for info in table.values(): info['adminStatus'] = info['adminStatus'] == 'poweredOn' info['operStatus'] = info['operStatus'] == 'running' info['snmpindex'] = info['vmid'] del info['vmid'] om = self.objectMap(info) om.id = self.prepId(om.displayName) rm.append(om) return [rm]
zenoss/ZenPacks.community.VMwareESXMonitor
ZenPacks/community/VMwareESXMonitor/modeler/plugins/zenoss/snmp/Esx.py
Python
gpl-2.0
1,873
#ifndef UTIL_TIME_HPP #define UTIL_TIME_HPP #include <boost/date_time/posix_time/posix_time_types.hpp> // parse a time string (ISO 8601 - YYYY-MM-DDTHH:MM:SSZ) boost::posix_time::ptime parse_time(const std::string &); #endif /* UTIL_TIME_HPP */
openstreetmap/cgimap
include/cgimap/time.hpp
C++
gpl-2.0
248
/************************************************************************************** * Copyright (C) 2008 EsperTech, Inc. All rights reserved. * * http://esper.codehaus.org * * http://www.espertech.com * * ---------------------------------------------------------------------------------- * * The software in this package is published under the terms of the GPL license * * a copy of which has been included with this distribution in the license.txt file. * **************************************************************************************/ package com.espertech.esper.epl.spec; import java.util.List; import java.util.ArrayList; /** * Specification for the on-set statement. */ public class OnTriggerSetDesc extends OnTriggerDesc { private List<OnTriggerSetAssignment> assignments; private static final long serialVersionUID = -5104683353293495487L; /** * Ctor. * @param assignments is a list of assignments */ public OnTriggerSetDesc(List<OnTriggerSetAssignment> assignments) { super(OnTriggerType.ON_SET); this.assignments = assignments; } /** * Returns a list of all variables assignment by the on-set * @return list of assignments */ public List<OnTriggerSetAssignment> getAssignments() { return assignments; } }
mobile-event-processing/Asper
source/src/com/espertech/esper/epl/spec/OnTriggerSetDesc.java
Java
gpl-2.0
1,522
# CMAKE generated file: DO NOT EDIT! # Generated by "Unix Makefiles" Generator, CMake Version 3.3 # Relative path conversion top directories. set(CMAKE_RELATIVE_PATH_TOP_SOURCE "/Users/husseinalzandvirtual/linphone-iphone/submodules/belle-sip") set(CMAKE_RELATIVE_PATH_TOP_BINARY "/Users/husseinalzandvirtual/linphone-iphone/WORK/ios-x86_64/Build/bellesip") # Force unix paths in dependencies. set(CMAKE_FORCE_UNIX_PATHS 1) # The C and CXX include file regular expressions for this directory. set(CMAKE_C_INCLUDE_REGEX_SCAN "^.*$") set(CMAKE_C_INCLUDE_REGEX_COMPLAIN "^$") set(CMAKE_CXX_INCLUDE_REGEX_SCAN ${CMAKE_C_INCLUDE_REGEX_SCAN}) set(CMAKE_CXX_INCLUDE_REGEX_COMPLAIN ${CMAKE_C_INCLUDE_REGEX_COMPLAIN})
husseinalzand/linphone
WORK/ios-x86_64/Build/bellesip/src/CMakeFiles/CMakeDirectoryInformation.cmake
CMake
gpl-2.0
713