staging/lustre/mdc: move mdc-specific procfs files to sysfs
authorOleg Drokin <green@linuxhacker.ru>
Thu, 21 May 2015 19:32:20 +0000 (15:32 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 31 May 2015 02:18:24 +0000 (11:18 +0900)
This moves max_rpcs_in_flight and max_pages_per_rpc to
/proc/fs/lustre/mdc/.../

Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lustre/mdc/lproc_mdc.c
drivers/staging/lustre/sysfs-fs-lustre

index b1ef178b6f2fc5ae50268e034cd54a82e5fc8947..2121ca761a26e18bff6264a9e0ddc21e54fbc912 100644 (file)
 #include "../include/lprocfs_status.h"
 #include "mdc_internal.h"
 
-static int mdc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v)
+static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
+                                      struct attribute *attr,
+                                      char *buf)
 {
-       struct obd_device *dev = m->private;
+       int len;
+       struct obd_device *dev = container_of(kobj, struct obd_device,
+                                             obd_kobj);
        struct client_obd *cli = &dev->u.cli;
 
        client_obd_list_lock(&cli->cl_loi_list_lock);
-       seq_printf(m, "%u\n", cli->cl_max_rpcs_in_flight);
+       len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
        client_obd_list_unlock(&cli->cl_loi_list_lock);
 
-       return 0;
+       return len;
 }
 
-static ssize_t mdc_max_rpcs_in_flight_seq_write(struct file *file,
-                                               const char __user *buffer,
-                                               size_t count,
-                                               loff_t *off)
+static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
+                                       struct attribute *attr,
+                                       const char *buffer,
+                                       size_t count)
 {
-       struct obd_device *dev =
-                       ((struct seq_file *)file->private_data)->private;
+       struct obd_device *dev = container_of(kobj, struct obd_device,
+                                             obd_kobj);
        struct client_obd *cli = &dev->u.cli;
-       int val, rc;
+       int rc;
+       unsigned long val;
 
-       rc = lprocfs_write_helper(buffer, count, &val);
+       rc = kstrtoul(buffer, 10, &val);
        if (rc)
                return rc;
 
@@ -75,7 +80,7 @@ static ssize_t mdc_max_rpcs_in_flight_seq_write(struct file *file,
 
        return count;
 }
-LPROC_SEQ_FOPS(mdc_max_rpcs_in_flight);
+LUSTRE_RW_ATTR(max_rpcs_in_flight);
 
 static int mdc_kuc_open(struct inode *inode, struct file *file)
 {
@@ -161,11 +166,23 @@ LPROC_SEQ_FOPS_RO_TYPE(mdc, conn_uuid);
 LPROC_SEQ_FOPS_RO_TYPE(mdc, timeouts);
 LPROC_SEQ_FOPS_RO_TYPE(mdc, state);
 
-static int mdc_obd_max_pages_per_rpc_seq_show(struct seq_file *m, void *v)
+/*
+ * Note: below sysfs entry is provided, but not currently in use, instead
+ * sbi->sb_md_brw_size is used, the per obd variable should be used
+ * when DNE is enabled, and dir pages are managed in MDC layer.
+ * Don't forget to enable sysfs store function then.
+ */
+static ssize_t max_pages_per_rpc_show(struct kobject *kobj,
+                                     struct attribute *attr,
+                                     char *buf)
 {
-       return lprocfs_obd_rd_max_pages_per_rpc(m, m->private);
+       struct obd_device *dev = container_of(kobj, struct obd_device,
+                                             obd_kobj);
+       struct client_obd *cli = &dev->u.cli;
+
+       return sprintf(buf, "%d\n", cli->cl_max_pages_per_rpc);
 }
-LPROC_SEQ_FOPS_RO(mdc_obd_max_pages_per_rpc);
+LUSTRE_RO_ATTR(max_pages_per_rpc);
 
 LPROC_SEQ_FOPS_RW_TYPE(mdc, import);
 LPROC_SEQ_FOPS_RW_TYPE(mdc, pinger_recov);
@@ -176,14 +193,6 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
        /*{ "filegroups",       lprocfs_rd_filegroups,          NULL, 0 },*/
        { "mds_server_uuid",    &mdc_server_uuid_fops,          NULL, 0 },
        { "mds_conn_uuid",      &mdc_conn_uuid_fops,            NULL, 0 },
-       /*
-        * FIXME: below proc entry is provided, but not in used, instead
-        * sbi->sb_md_brw_size is used, the per obd variable should be used
-        * when CMD is enabled, and dir pages are managed in MDC layer.
-        * Remember to enable proc write function.
-        */
-       { "max_pages_per_rpc",  &mdc_obd_max_pages_per_rpc_fops, NULL, 0 },
-       { "max_rpcs_in_flight", &mdc_max_rpcs_in_flight_fops,   NULL, 0 },
        { "timeouts",           &mdc_timeouts_fops,             NULL, 0 },
        { "import",             &mdc_import_fops,               NULL, 0 },
        { "state",              &mdc_state_fops,                NULL, 0 },
@@ -192,7 +201,18 @@ static struct lprocfs_vars lprocfs_mdc_obd_vars[] = {
        { NULL }
 };
 
+static struct attribute *mdc_attrs[] = {
+       &lustre_attr_max_rpcs_in_flight.attr,
+       &lustre_attr_max_pages_per_rpc.attr,
+       NULL,
+};
+
+static struct attribute_group mdc_attr_group = {
+       .attrs = mdc_attrs,
+};
+
 void lprocfs_mdc_init_vars(struct lprocfs_static_vars *lvars)
 {
+       lvars->sysfs_vars   = &mdc_attr_group;
        lvars->obd_vars     = lprocfs_mdc_obd_vars;
 }
index 61101f272a39ef50f17817b21fd940229d30b60e..a99078c25d6a81e7b2fe255af8c8efcd3287d8a4 100644 (file)
@@ -384,3 +384,19 @@ Description:
                Number of free inodes on backend filesystem for service
                behind this obd.
 
+What:          /sys/fs/lustre/mdc/{connection_name}/max_pages_per_rpc
+Date:          May 2015
+Contact:       "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+               Maximum number of readdir pages to fit into a single readdir
+               RPC.
+
+What:          /sys/fs/lustre/mdc/{connection_name}/max_rpcs_in_flight
+Date:          May 2015
+Contact:       "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+               Maximum number of parallel RPCs on the wire to allow on
+               this connection. Increasing this number would help on higher
+               latency links, but has a chance of overloading a server
+               if you have too many clients like this.
+               Default: 8