2 * Register map access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/rbtree.h>
19 #include <linux/sched.h>
21 #define CREATE_TRACE_POINTS
22 #include <trace/events/regmap.h>
27 * Sometimes for failures during very early init the trace
28 * infrastructure isn't available early enough to be used. For this
29 * sort of problem defining LOG_DEVICE will add printks for basic
30 * register I/O on a specific device.
34 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
35 unsigned int mask, unsigned int val,
38 static int _regmap_bus_read(void *context, unsigned int reg,
40 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
42 static int _regmap_bus_raw_write(void *context, unsigned int reg,
45 bool regmap_reg_in_ranges(unsigned int reg,
46 const struct regmap_range *ranges,
49 const struct regmap_range *r;
52 for (i = 0, r = ranges; i < nranges; i++, r++)
53 if (regmap_reg_in_range(reg, r))
57 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
59 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
60 const struct regmap_access_table *table)
62 /* Check "no ranges" first */
63 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
66 /* In case zero "yes ranges" are supplied, any reg is OK */
67 if (!table->n_yes_ranges)
70 return regmap_reg_in_ranges(reg, table->yes_ranges,
73 EXPORT_SYMBOL_GPL(regmap_check_range_table);
75 bool regmap_writeable(struct regmap *map, unsigned int reg)
77 if (map->max_register && reg > map->max_register)
80 if (map->writeable_reg)
81 return map->writeable_reg(map->dev, reg);
84 return regmap_check_range_table(map, reg, map->wr_table);
89 bool regmap_readable(struct regmap *map, unsigned int reg)
91 if (map->max_register && reg > map->max_register)
94 if (map->format.format_write)
97 if (map->readable_reg)
98 return map->readable_reg(map->dev, reg);
101 return regmap_check_range_table(map, reg, map->rd_table);
106 bool regmap_volatile(struct regmap *map, unsigned int reg)
108 if (!regmap_readable(map, reg))
111 if (map->volatile_reg)
112 return map->volatile_reg(map->dev, reg);
114 if (map->volatile_table)
115 return regmap_check_range_table(map, reg, map->volatile_table);
123 bool regmap_precious(struct regmap *map, unsigned int reg)
125 if (!regmap_readable(map, reg))
128 if (map->precious_reg)
129 return map->precious_reg(map->dev, reg);
131 if (map->precious_table)
132 return regmap_check_range_table(map, reg, map->precious_table);
137 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
142 for (i = 0; i < num; i++)
143 if (!regmap_volatile(map, reg + i))
149 static void regmap_format_2_6_write(struct regmap *map,
150 unsigned int reg, unsigned int val)
152 u8 *out = map->work_buf;
154 *out = (reg << 6) | val;
157 static void regmap_format_4_12_write(struct regmap *map,
158 unsigned int reg, unsigned int val)
160 __be16 *out = map->work_buf;
161 *out = cpu_to_be16((reg << 12) | val);
164 static void regmap_format_7_9_write(struct regmap *map,
165 unsigned int reg, unsigned int val)
167 __be16 *out = map->work_buf;
168 *out = cpu_to_be16((reg << 9) | val);
171 static void regmap_format_10_14_write(struct regmap *map,
172 unsigned int reg, unsigned int val)
174 u8 *out = map->work_buf;
177 out[1] = (val >> 8) | (reg << 6);
181 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
188 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
192 b[0] = cpu_to_be16(val << shift);
195 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
199 b[0] = cpu_to_le16(val << shift);
202 static void regmap_format_16_native(void *buf, unsigned int val,
205 *(u16 *)buf = val << shift;
208 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
219 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
223 b[0] = cpu_to_be32(val << shift);
226 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
230 b[0] = cpu_to_le32(val << shift);
233 static void regmap_format_32_native(void *buf, unsigned int val,
236 *(u32 *)buf = val << shift;
239 static void regmap_parse_inplace_noop(void *buf)
243 static unsigned int regmap_parse_8(const void *buf)
250 static unsigned int regmap_parse_16_be(const void *buf)
252 const __be16 *b = buf;
254 return be16_to_cpu(b[0]);
257 static unsigned int regmap_parse_16_le(const void *buf)
259 const __le16 *b = buf;
261 return le16_to_cpu(b[0]);
264 static void regmap_parse_16_be_inplace(void *buf)
268 b[0] = be16_to_cpu(b[0]);
271 static void regmap_parse_16_le_inplace(void *buf)
275 b[0] = le16_to_cpu(b[0]);
278 static unsigned int regmap_parse_16_native(const void *buf)
283 static unsigned int regmap_parse_24(const void *buf)
286 unsigned int ret = b[2];
287 ret |= ((unsigned int)b[1]) << 8;
288 ret |= ((unsigned int)b[0]) << 16;
293 static unsigned int regmap_parse_32_be(const void *buf)
295 const __be32 *b = buf;
297 return be32_to_cpu(b[0]);
300 static unsigned int regmap_parse_32_le(const void *buf)
302 const __le32 *b = buf;
304 return le32_to_cpu(b[0]);
307 static void regmap_parse_32_be_inplace(void *buf)
311 b[0] = be32_to_cpu(b[0]);
314 static void regmap_parse_32_le_inplace(void *buf)
318 b[0] = le32_to_cpu(b[0]);
321 static unsigned int regmap_parse_32_native(const void *buf)
326 static void regmap_lock_mutex(void *__map)
328 struct regmap *map = __map;
329 mutex_lock(&map->mutex);
332 static void regmap_unlock_mutex(void *__map)
334 struct regmap *map = __map;
335 mutex_unlock(&map->mutex);
338 static void regmap_lock_spinlock(void *__map)
339 __acquires(&map->spinlock)
341 struct regmap *map = __map;
344 spin_lock_irqsave(&map->spinlock, flags);
345 map->spinlock_flags = flags;
348 static void regmap_unlock_spinlock(void *__map)
349 __releases(&map->spinlock)
351 struct regmap *map = __map;
352 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
355 static void dev_get_regmap_release(struct device *dev, void *res)
358 * We don't actually have anything to do here; the goal here
359 * is not to manage the regmap but to provide a simple way to
360 * get the regmap back given a struct device.
364 static bool _regmap_range_add(struct regmap *map,
365 struct regmap_range_node *data)
367 struct rb_root *root = &map->range_tree;
368 struct rb_node **new = &(root->rb_node), *parent = NULL;
371 struct regmap_range_node *this =
372 container_of(*new, struct regmap_range_node, node);
375 if (data->range_max < this->range_min)
376 new = &((*new)->rb_left);
377 else if (data->range_min > this->range_max)
378 new = &((*new)->rb_right);
383 rb_link_node(&data->node, parent, new);
384 rb_insert_color(&data->node, root);
389 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
392 struct rb_node *node = map->range_tree.rb_node;
395 struct regmap_range_node *this =
396 container_of(node, struct regmap_range_node, node);
398 if (reg < this->range_min)
399 node = node->rb_left;
400 else if (reg > this->range_max)
401 node = node->rb_right;
409 static void regmap_range_exit(struct regmap *map)
411 struct rb_node *next;
412 struct regmap_range_node *range_node;
414 next = rb_first(&map->range_tree);
416 range_node = rb_entry(next, struct regmap_range_node, node);
417 next = rb_next(&range_node->node);
418 rb_erase(&range_node->node, &map->range_tree);
422 kfree(map->selector_work_buf);
425 int regmap_attach_dev(struct device *dev, struct regmap *map,
426 const struct regmap_config *config)
432 regmap_debugfs_init(map, config->name);
434 /* Add a devres resource for dev_get_regmap() */
435 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
437 regmap_debugfs_exit(map);
445 EXPORT_SYMBOL_GPL(regmap_attach_dev);
448 * regmap_init(): Initialise register map
450 * @dev: Device that will be interacted with
451 * @bus: Bus-specific callbacks to use with device
452 * @bus_context: Data passed to bus-specific callbacks
453 * @config: Configuration for register map
455 * The return value will be an ERR_PTR() on error or a valid pointer to
456 * a struct regmap. This function should generally not be called
457 * directly, it should be called by bus-specific init functions.
459 struct regmap *regmap_init(struct device *dev,
460 const struct regmap_bus *bus,
462 const struct regmap_config *config)
466 enum regmap_endian reg_endian, val_endian;
472 map = kzalloc(sizeof(*map), GFP_KERNEL);
478 if (config->lock && config->unlock) {
479 map->lock = config->lock;
480 map->unlock = config->unlock;
481 map->lock_arg = config->lock_arg;
483 if ((bus && bus->fast_io) ||
485 spin_lock_init(&map->spinlock);
486 map->lock = regmap_lock_spinlock;
487 map->unlock = regmap_unlock_spinlock;
489 mutex_init(&map->mutex);
490 map->lock = regmap_lock_mutex;
491 map->unlock = regmap_unlock_mutex;
495 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
496 map->format.pad_bytes = config->pad_bits / 8;
497 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
498 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
499 config->val_bits + config->pad_bits, 8);
500 map->reg_shift = config->pad_bits % 8;
501 if (config->reg_stride)
502 map->reg_stride = config->reg_stride;
505 map->use_single_rw = config->use_single_rw;
506 map->can_multi_write = config->can_multi_write;
509 map->bus_context = bus_context;
510 map->max_register = config->max_register;
511 map->wr_table = config->wr_table;
512 map->rd_table = config->rd_table;
513 map->volatile_table = config->volatile_table;
514 map->precious_table = config->precious_table;
515 map->writeable_reg = config->writeable_reg;
516 map->readable_reg = config->readable_reg;
517 map->volatile_reg = config->volatile_reg;
518 map->precious_reg = config->precious_reg;
519 map->cache_type = config->cache_type;
520 map->name = config->name;
522 spin_lock_init(&map->async_lock);
523 INIT_LIST_HEAD(&map->async_list);
524 INIT_LIST_HEAD(&map->async_free);
525 init_waitqueue_head(&map->async_waitq);
527 if (config->read_flag_mask || config->write_flag_mask) {
528 map->read_flag_mask = config->read_flag_mask;
529 map->write_flag_mask = config->write_flag_mask;
531 map->read_flag_mask = bus->read_flag_mask;
535 map->reg_read = config->reg_read;
536 map->reg_write = config->reg_write;
538 map->defer_caching = false;
539 goto skip_format_initialization;
541 map->reg_read = _regmap_bus_read;
544 reg_endian = config->reg_format_endian;
545 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
546 reg_endian = bus->reg_format_endian_default;
547 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
548 reg_endian = REGMAP_ENDIAN_BIG;
550 val_endian = config->val_format_endian;
551 if (val_endian == REGMAP_ENDIAN_DEFAULT)
552 val_endian = bus->val_format_endian_default;
553 if (val_endian == REGMAP_ENDIAN_DEFAULT)
554 val_endian = REGMAP_ENDIAN_BIG;
556 switch (config->reg_bits + map->reg_shift) {
558 switch (config->val_bits) {
560 map->format.format_write = regmap_format_2_6_write;
568 switch (config->val_bits) {
570 map->format.format_write = regmap_format_4_12_write;
578 switch (config->val_bits) {
580 map->format.format_write = regmap_format_7_9_write;
588 switch (config->val_bits) {
590 map->format.format_write = regmap_format_10_14_write;
598 map->format.format_reg = regmap_format_8;
602 switch (reg_endian) {
603 case REGMAP_ENDIAN_BIG:
604 map->format.format_reg = regmap_format_16_be;
606 case REGMAP_ENDIAN_NATIVE:
607 map->format.format_reg = regmap_format_16_native;
615 if (reg_endian != REGMAP_ENDIAN_BIG)
617 map->format.format_reg = regmap_format_24;
621 switch (reg_endian) {
622 case REGMAP_ENDIAN_BIG:
623 map->format.format_reg = regmap_format_32_be;
625 case REGMAP_ENDIAN_NATIVE:
626 map->format.format_reg = regmap_format_32_native;
637 if (val_endian == REGMAP_ENDIAN_NATIVE)
638 map->format.parse_inplace = regmap_parse_inplace_noop;
640 switch (config->val_bits) {
642 map->format.format_val = regmap_format_8;
643 map->format.parse_val = regmap_parse_8;
644 map->format.parse_inplace = regmap_parse_inplace_noop;
647 switch (val_endian) {
648 case REGMAP_ENDIAN_BIG:
649 map->format.format_val = regmap_format_16_be;
650 map->format.parse_val = regmap_parse_16_be;
651 map->format.parse_inplace = regmap_parse_16_be_inplace;
653 case REGMAP_ENDIAN_LITTLE:
654 map->format.format_val = regmap_format_16_le;
655 map->format.parse_val = regmap_parse_16_le;
656 map->format.parse_inplace = regmap_parse_16_le_inplace;
658 case REGMAP_ENDIAN_NATIVE:
659 map->format.format_val = regmap_format_16_native;
660 map->format.parse_val = regmap_parse_16_native;
667 if (val_endian != REGMAP_ENDIAN_BIG)
669 map->format.format_val = regmap_format_24;
670 map->format.parse_val = regmap_parse_24;
673 switch (val_endian) {
674 case REGMAP_ENDIAN_BIG:
675 map->format.format_val = regmap_format_32_be;
676 map->format.parse_val = regmap_parse_32_be;
677 map->format.parse_inplace = regmap_parse_32_be_inplace;
679 case REGMAP_ENDIAN_LITTLE:
680 map->format.format_val = regmap_format_32_le;
681 map->format.parse_val = regmap_parse_32_le;
682 map->format.parse_inplace = regmap_parse_32_le_inplace;
684 case REGMAP_ENDIAN_NATIVE:
685 map->format.format_val = regmap_format_32_native;
686 map->format.parse_val = regmap_parse_32_native;
694 if (map->format.format_write) {
695 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
696 (val_endian != REGMAP_ENDIAN_BIG))
698 map->use_single_rw = true;
701 if (!map->format.format_write &&
702 !(map->format.format_reg && map->format.format_val))
705 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
706 if (map->work_buf == NULL) {
711 if (map->format.format_write) {
712 map->defer_caching = false;
713 map->reg_write = _regmap_bus_formatted_write;
714 } else if (map->format.format_val) {
715 map->defer_caching = true;
716 map->reg_write = _regmap_bus_raw_write;
719 skip_format_initialization:
721 map->range_tree = RB_ROOT;
722 for (i = 0; i < config->num_ranges; i++) {
723 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
724 struct regmap_range_node *new;
727 if (range_cfg->range_max < range_cfg->range_min) {
728 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
729 range_cfg->range_max, range_cfg->range_min);
733 if (range_cfg->range_max > map->max_register) {
734 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
735 range_cfg->range_max, map->max_register);
739 if (range_cfg->selector_reg > map->max_register) {
741 "Invalid range %d: selector out of map\n", i);
745 if (range_cfg->window_len == 0) {
746 dev_err(map->dev, "Invalid range %d: window_len 0\n",
751 /* Make sure, that this register range has no selector
752 or data window within its boundary */
753 for (j = 0; j < config->num_ranges; j++) {
754 unsigned sel_reg = config->ranges[j].selector_reg;
755 unsigned win_min = config->ranges[j].window_start;
756 unsigned win_max = win_min +
757 config->ranges[j].window_len - 1;
759 /* Allow data window inside its own virtual range */
763 if (range_cfg->range_min <= sel_reg &&
764 sel_reg <= range_cfg->range_max) {
766 "Range %d: selector for %d in window\n",
771 if (!(win_max < range_cfg->range_min ||
772 win_min > range_cfg->range_max)) {
774 "Range %d: window for %d in window\n",
780 new = kzalloc(sizeof(*new), GFP_KERNEL);
787 new->name = range_cfg->name;
788 new->range_min = range_cfg->range_min;
789 new->range_max = range_cfg->range_max;
790 new->selector_reg = range_cfg->selector_reg;
791 new->selector_mask = range_cfg->selector_mask;
792 new->selector_shift = range_cfg->selector_shift;
793 new->window_start = range_cfg->window_start;
794 new->window_len = range_cfg->window_len;
796 if (!_regmap_range_add(map, new)) {
797 dev_err(map->dev, "Failed to add range %d\n", i);
802 if (map->selector_work_buf == NULL) {
803 map->selector_work_buf =
804 kzalloc(map->format.buf_size, GFP_KERNEL);
805 if (map->selector_work_buf == NULL) {
812 ret = regcache_init(map, config);
817 ret = regmap_attach_dev(dev, map, config);
827 regmap_range_exit(map);
828 kfree(map->work_buf);
834 EXPORT_SYMBOL_GPL(regmap_init);
836 static void devm_regmap_release(struct device *dev, void *res)
838 regmap_exit(*(struct regmap **)res);
842 * devm_regmap_init(): Initialise managed register map
844 * @dev: Device that will be interacted with
845 * @bus: Bus-specific callbacks to use with device
846 * @bus_context: Data passed to bus-specific callbacks
847 * @config: Configuration for register map
849 * The return value will be an ERR_PTR() on error or a valid pointer
850 * to a struct regmap. This function should generally not be called
851 * directly, it should be called by bus-specific init functions. The
852 * map will be automatically freed by the device management code.
854 struct regmap *devm_regmap_init(struct device *dev,
855 const struct regmap_bus *bus,
857 const struct regmap_config *config)
859 struct regmap **ptr, *regmap;
861 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
863 return ERR_PTR(-ENOMEM);
865 regmap = regmap_init(dev, bus, bus_context, config);
866 if (!IS_ERR(regmap)) {
868 devres_add(dev, ptr);
875 EXPORT_SYMBOL_GPL(devm_regmap_init);
877 static void regmap_field_init(struct regmap_field *rm_field,
878 struct regmap *regmap, struct reg_field reg_field)
880 int field_bits = reg_field.msb - reg_field.lsb + 1;
881 rm_field->regmap = regmap;
882 rm_field->reg = reg_field.reg;
883 rm_field->shift = reg_field.lsb;
884 rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
885 rm_field->id_size = reg_field.id_size;
886 rm_field->id_offset = reg_field.id_offset;
890 * devm_regmap_field_alloc(): Allocate and initialise a register field
893 * @dev: Device that will be interacted with
894 * @regmap: regmap bank in which this register field is located.
895 * @reg_field: Register field with in the bank.
897 * The return value will be an ERR_PTR() on error or a valid pointer
898 * to a struct regmap_field. The regmap_field will be automatically freed
899 * by the device management code.
901 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
902 struct regmap *regmap, struct reg_field reg_field)
904 struct regmap_field *rm_field = devm_kzalloc(dev,
905 sizeof(*rm_field), GFP_KERNEL);
907 return ERR_PTR(-ENOMEM);
909 regmap_field_init(rm_field, regmap, reg_field);
914 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
917 * devm_regmap_field_free(): Free register field allocated using
918 * devm_regmap_field_alloc. Usally drivers need not call this function,
919 * as the memory allocated via devm will be freed as per device-driver
922 * @dev: Device that will be interacted with
923 * @field: regmap field which should be freed.
925 void devm_regmap_field_free(struct device *dev,
926 struct regmap_field *field)
928 devm_kfree(dev, field);
930 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
933 * regmap_field_alloc(): Allocate and initialise a register field
936 * @regmap: regmap bank in which this register field is located.
937 * @reg_field: Register field with in the bank.
939 * The return value will be an ERR_PTR() on error or a valid pointer
940 * to a struct regmap_field. The regmap_field should be freed by the
941 * user once its finished working with it using regmap_field_free().
943 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
944 struct reg_field reg_field)
946 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
949 return ERR_PTR(-ENOMEM);
951 regmap_field_init(rm_field, regmap, reg_field);
955 EXPORT_SYMBOL_GPL(regmap_field_alloc);
958 * regmap_field_free(): Free register field allocated using regmap_field_alloc
960 * @field: regmap field which should be freed.
962 void regmap_field_free(struct regmap_field *field)
966 EXPORT_SYMBOL_GPL(regmap_field_free);
969 * regmap_reinit_cache(): Reinitialise the current register cache
971 * @map: Register map to operate on.
972 * @config: New configuration. Only the cache data will be used.
974 * Discard any existing register cache for the map and initialize a
975 * new cache. This can be used to restore the cache to defaults or to
976 * update the cache configuration to reflect runtime discovery of the
979 * No explicit locking is done here, the user needs to ensure that
980 * this function will not race with other calls to regmap.
982 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
985 regmap_debugfs_exit(map);
987 map->max_register = config->max_register;
988 map->writeable_reg = config->writeable_reg;
989 map->readable_reg = config->readable_reg;
990 map->volatile_reg = config->volatile_reg;
991 map->precious_reg = config->precious_reg;
992 map->cache_type = config->cache_type;
994 regmap_debugfs_init(map, config->name);
996 map->cache_bypass = false;
997 map->cache_only = false;
999 return regcache_init(map, config);
1001 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1004 * regmap_exit(): Free a previously allocated register map
1006 void regmap_exit(struct regmap *map)
1008 struct regmap_async *async;
1011 regmap_debugfs_exit(map);
1012 regmap_range_exit(map);
1013 if (map->bus && map->bus->free_context)
1014 map->bus->free_context(map->bus_context);
1015 kfree(map->work_buf);
1016 while (!list_empty(&map->async_free)) {
1017 async = list_first_entry_or_null(&map->async_free,
1018 struct regmap_async,
1020 list_del(&async->list);
1021 kfree(async->work_buf);
1026 EXPORT_SYMBOL_GPL(regmap_exit);
1028 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1030 struct regmap **r = res;
1036 /* If the user didn't specify a name match any */
1038 return (*r)->name == data;
1044 * dev_get_regmap(): Obtain the regmap (if any) for a device
1046 * @dev: Device to retrieve the map for
1047 * @name: Optional name for the register map, usually NULL.
1049 * Returns the regmap for the device if one is present, or NULL. If
1050 * name is specified then it must match the name specified when
1051 * registering the device, if it is NULL then the first regmap found
1052 * will be used. Devices with multiple register maps are very rare,
1053 * generic code should normally not need to specify a name.
1055 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1057 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1058 dev_get_regmap_match, (void *)name);
1064 EXPORT_SYMBOL_GPL(dev_get_regmap);
1066 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1067 struct regmap_range_node *range,
1068 unsigned int val_num)
1070 void *orig_work_buf;
1071 unsigned int win_offset;
1072 unsigned int win_page;
1076 win_offset = (*reg - range->range_min) % range->window_len;
1077 win_page = (*reg - range->range_min) / range->window_len;
1080 /* Bulk write shouldn't cross range boundary */
1081 if (*reg + val_num - 1 > range->range_max)
1084 /* ... or single page boundary */
1085 if (val_num > range->window_len - win_offset)
1089 /* It is possible to have selector register inside data window.
1090 In that case, selector register is located on every page and
1091 it needs no page switching, when accessed alone. */
1093 range->window_start + win_offset != range->selector_reg) {
1094 /* Use separate work_buf during page switching */
1095 orig_work_buf = map->work_buf;
1096 map->work_buf = map->selector_work_buf;
1098 ret = _regmap_update_bits(map, range->selector_reg,
1099 range->selector_mask,
1100 win_page << range->selector_shift,
1103 map->work_buf = orig_work_buf;
1109 *reg = range->window_start + win_offset;
1114 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1115 const void *val, size_t val_len)
1117 struct regmap_range_node *range;
1118 unsigned long flags;
1119 u8 *u8 = map->work_buf;
1120 void *work_val = map->work_buf + map->format.reg_bytes +
1121 map->format.pad_bytes;
1123 int ret = -ENOTSUPP;
1129 /* Check for unwritable registers before we start */
1130 if (map->writeable_reg)
1131 for (i = 0; i < val_len / map->format.val_bytes; i++)
1132 if (!map->writeable_reg(map->dev,
1133 reg + (i * map->reg_stride)))
1136 if (!map->cache_bypass && map->format.parse_val) {
1138 int val_bytes = map->format.val_bytes;
1139 for (i = 0; i < val_len / val_bytes; i++) {
1140 ival = map->format.parse_val(val + (i * val_bytes));
1141 ret = regcache_write(map, reg + (i * map->reg_stride),
1145 "Error in caching of register: %x ret: %d\n",
1150 if (map->cache_only) {
1151 map->cache_dirty = true;
1156 range = _regmap_range_lookup(map, reg);
1158 int val_num = val_len / map->format.val_bytes;
1159 int win_offset = (reg - range->range_min) % range->window_len;
1160 int win_residue = range->window_len - win_offset;
1162 /* If the write goes beyond the end of the window split it */
1163 while (val_num > win_residue) {
1164 dev_dbg(map->dev, "Writing window %d/%zu\n",
1165 win_residue, val_len / map->format.val_bytes);
1166 ret = _regmap_raw_write(map, reg, val, win_residue *
1167 map->format.val_bytes);
1172 val_num -= win_residue;
1173 val += win_residue * map->format.val_bytes;
1174 val_len -= win_residue * map->format.val_bytes;
1176 win_offset = (reg - range->range_min) %
1178 win_residue = range->window_len - win_offset;
1181 ret = _regmap_select_page(map, ®, range, val_num);
1186 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1188 u8[0] |= map->write_flag_mask;
1191 * Essentially all I/O mechanisms will be faster with a single
1192 * buffer to write. Since register syncs often generate raw
1193 * writes of single registers optimise that case.
1195 if (val != work_val && val_len == map->format.val_bytes) {
1196 memcpy(work_val, val, map->format.val_bytes);
1200 if (map->async && map->bus->async_write) {
1201 struct regmap_async *async;
1203 trace_regmap_async_write_start(map->dev, reg, val_len);
1205 spin_lock_irqsave(&map->async_lock, flags);
1206 async = list_first_entry_or_null(&map->async_free,
1207 struct regmap_async,
1210 list_del(&async->list);
1211 spin_unlock_irqrestore(&map->async_lock, flags);
1214 async = map->bus->async_alloc();
1218 async->work_buf = kzalloc(map->format.buf_size,
1219 GFP_KERNEL | GFP_DMA);
1220 if (!async->work_buf) {
1228 /* If the caller supplied the value we can use it safely. */
1229 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1230 map->format.reg_bytes + map->format.val_bytes);
1232 spin_lock_irqsave(&map->async_lock, flags);
1233 list_add_tail(&async->list, &map->async_list);
1234 spin_unlock_irqrestore(&map->async_lock, flags);
1236 if (val != work_val)
1237 ret = map->bus->async_write(map->bus_context,
1239 map->format.reg_bytes +
1240 map->format.pad_bytes,
1241 val, val_len, async);
1243 ret = map->bus->async_write(map->bus_context,
1245 map->format.reg_bytes +
1246 map->format.pad_bytes +
1247 val_len, NULL, 0, async);
1250 dev_err(map->dev, "Failed to schedule write: %d\n",
1253 spin_lock_irqsave(&map->async_lock, flags);
1254 list_move(&async->list, &map->async_free);
1255 spin_unlock_irqrestore(&map->async_lock, flags);
1261 trace_regmap_hw_write_start(map->dev, reg,
1262 val_len / map->format.val_bytes);
1264 /* If we're doing a single register write we can probably just
1265 * send the work_buf directly, otherwise try to do a gather
1268 if (val == work_val)
1269 ret = map->bus->write(map->bus_context, map->work_buf,
1270 map->format.reg_bytes +
1271 map->format.pad_bytes +
1273 else if (map->bus->gather_write)
1274 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1275 map->format.reg_bytes +
1276 map->format.pad_bytes,
1279 /* If that didn't work fall back on linearising by hand. */
1280 if (ret == -ENOTSUPP) {
1281 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1282 buf = kzalloc(len, GFP_KERNEL);
1286 memcpy(buf, map->work_buf, map->format.reg_bytes);
1287 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1289 ret = map->bus->write(map->bus_context, buf, len);
1294 trace_regmap_hw_write_done(map->dev, reg,
1295 val_len / map->format.val_bytes);
1301 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1303 * @map: Map to check.
1305 bool regmap_can_raw_write(struct regmap *map)
1307 return map->bus && map->format.format_val && map->format.format_reg;
1309 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1311 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1315 struct regmap_range_node *range;
1316 struct regmap *map = context;
1318 WARN_ON(!map->bus || !map->format.format_write);
1320 range = _regmap_range_lookup(map, reg);
1322 ret = _regmap_select_page(map, ®, range, 1);
1327 map->format.format_write(map, reg, val);
1329 trace_regmap_hw_write_start(map->dev, reg, 1);
1331 ret = map->bus->write(map->bus_context, map->work_buf,
1332 map->format.buf_size);
1334 trace_regmap_hw_write_done(map->dev, reg, 1);
1339 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1342 struct regmap *map = context;
1344 WARN_ON(!map->bus || !map->format.format_val);
1346 map->format.format_val(map->work_buf + map->format.reg_bytes
1347 + map->format.pad_bytes, val, 0);
1348 return _regmap_raw_write(map, reg,
1350 map->format.reg_bytes +
1351 map->format.pad_bytes,
1352 map->format.val_bytes);
1355 static inline void *_regmap_map_get_context(struct regmap *map)
1357 return (map->bus) ? map : map->bus_context;
1360 int _regmap_write(struct regmap *map, unsigned int reg,
1364 void *context = _regmap_map_get_context(map);
1366 if (!regmap_writeable(map, reg))
1369 if (!map->cache_bypass && !map->defer_caching) {
1370 ret = regcache_write(map, reg, val);
1373 if (map->cache_only) {
1374 map->cache_dirty = true;
1380 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1381 dev_info(map->dev, "%x <= %x\n", reg, val);
1384 trace_regmap_reg_write(map->dev, reg, val);
1386 return map->reg_write(context, reg, val);
1390 * regmap_write(): Write a value to a single register
1392 * @map: Register map to write to
1393 * @reg: Register to write to
1394 * @val: Value to be written
1396 * A value of zero will be returned on success, a negative errno will
1397 * be returned in error cases.
1399 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1403 if (reg % map->reg_stride)
1406 map->lock(map->lock_arg);
1408 ret = _regmap_write(map, reg, val);
1410 map->unlock(map->lock_arg);
1414 EXPORT_SYMBOL_GPL(regmap_write);
1417 * regmap_write_async(): Write a value to a single register asynchronously
1419 * @map: Register map to write to
1420 * @reg: Register to write to
1421 * @val: Value to be written
1423 * A value of zero will be returned on success, a negative errno will
1424 * be returned in error cases.
1426 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1430 if (reg % map->reg_stride)
1433 map->lock(map->lock_arg);
1437 ret = _regmap_write(map, reg, val);
1441 map->unlock(map->lock_arg);
1445 EXPORT_SYMBOL_GPL(regmap_write_async);
1448 * regmap_raw_write(): Write raw values to one or more registers
1450 * @map: Register map to write to
1451 * @reg: Initial register to write to
1452 * @val: Block of data to be written, laid out for direct transmission to the
1454 * @val_len: Length of data pointed to by val.
1456 * This function is intended to be used for things like firmware
1457 * download where a large block of data needs to be transferred to the
1458 * device. No formatting will be done on the data provided.
1460 * A value of zero will be returned on success, a negative errno will
1461 * be returned in error cases.
1463 int regmap_raw_write(struct regmap *map, unsigned int reg,
1464 const void *val, size_t val_len)
1468 if (!regmap_can_raw_write(map))
1470 if (val_len % map->format.val_bytes)
1473 map->lock(map->lock_arg);
1475 ret = _regmap_raw_write(map, reg, val, val_len);
1477 map->unlock(map->lock_arg);
1481 EXPORT_SYMBOL_GPL(regmap_raw_write);
1484 * regmap_field_write(): Write a value to a single register field
1486 * @field: Register field to write to
1487 * @val: Value to be written
1489 * A value of zero will be returned on success, a negative errno will
1490 * be returned in error cases.
1492 int regmap_field_write(struct regmap_field *field, unsigned int val)
1494 return regmap_update_bits(field->regmap, field->reg,
1495 field->mask, val << field->shift);
1497 EXPORT_SYMBOL_GPL(regmap_field_write);
1500 * regmap_field_update_bits(): Perform a read/modify/write cycle
1501 * on the register field
1503 * @field: Register field to write to
1504 * @mask: Bitmask to change
1505 * @val: Value to be written
1507 * A value of zero will be returned on success, a negative errno will
1508 * be returned in error cases.
1510 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
1512 mask = (mask << field->shift) & field->mask;
1514 return regmap_update_bits(field->regmap, field->reg,
1515 mask, val << field->shift);
1517 EXPORT_SYMBOL_GPL(regmap_field_update_bits);
1520 * regmap_fields_write(): Write a value to a single register field with port ID
1522 * @field: Register field to write to
1524 * @val: Value to be written
1526 * A value of zero will be returned on success, a negative errno will
1527 * be returned in error cases.
1529 int regmap_fields_write(struct regmap_field *field, unsigned int id,
1532 if (id >= field->id_size)
1535 return regmap_update_bits(field->regmap,
1536 field->reg + (field->id_offset * id),
1537 field->mask, val << field->shift);
1539 EXPORT_SYMBOL_GPL(regmap_fields_write);
1542 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1543 * on the register field
1545 * @field: Register field to write to
1547 * @mask: Bitmask to change
1548 * @val: Value to be written
1550 * A value of zero will be returned on success, a negative errno will
1551 * be returned in error cases.
1553 int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
1554 unsigned int mask, unsigned int val)
1556 if (id >= field->id_size)
1559 mask = (mask << field->shift) & field->mask;
1561 return regmap_update_bits(field->regmap,
1562 field->reg + (field->id_offset * id),
1563 mask, val << field->shift);
1565 EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
1568 * regmap_bulk_write(): Write multiple registers to the device
1570 * @map: Register map to write to
1571 * @reg: First register to be write from
1572 * @val: Block of data to be written, in native register size for device
1573 * @val_count: Number of registers to write
1575 * This function is intended to be used for writing a large block of
1576 * data to the device either in single transfer or multiple transfer.
1578 * A value of zero will be returned on success, a negative errno will
1579 * be returned in error cases.
1581 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1585 size_t val_bytes = map->format.val_bytes;
1587 if (map->bus && !map->format.parse_inplace)
1589 if (reg % map->reg_stride)
1593 * Some devices don't support bulk write, for
1594 * them we have a series of single write operations.
1596 if (!map->bus || map->use_single_rw) {
1597 map->lock(map->lock_arg);
1598 for (i = 0; i < val_count; i++) {
1601 switch (val_bytes) {
1603 ival = *(u8 *)(val + (i * val_bytes));
1606 ival = *(u16 *)(val + (i * val_bytes));
1609 ival = *(u32 *)(val + (i * val_bytes));
1613 ival = *(u64 *)(val + (i * val_bytes));
1621 ret = _regmap_write(map, reg + (i * map->reg_stride),
1627 map->unlock(map->lock_arg);
1631 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1633 dev_err(map->dev, "Error in memory allocation\n");
1636 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1637 map->format.parse_inplace(wval + i);
1639 map->lock(map->lock_arg);
1640 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1641 map->unlock(map->lock_arg);
1647 EXPORT_SYMBOL_GPL(regmap_bulk_write);
1650 * _regmap_raw_multi_reg_write()
1652 * the (register,newvalue) pairs in regs have not been formatted, but
1653 * they are all in the same page and have been changed to being page
1654 * relative. The page register has been written if that was neccessary.
1656 static int _regmap_raw_multi_reg_write(struct regmap *map,
1657 const struct reg_default *regs,
1664 size_t val_bytes = map->format.val_bytes;
1665 size_t reg_bytes = map->format.reg_bytes;
1666 size_t pad_bytes = map->format.pad_bytes;
1667 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1668 size_t len = pair_size * num_regs;
1673 buf = kzalloc(len, GFP_KERNEL);
1677 /* We have to linearise by hand. */
1681 for (i = 0; i < num_regs; i++) {
1682 int reg = regs[i].reg;
1683 int val = regs[i].def;
1684 trace_regmap_hw_write_start(map->dev, reg, 1);
1685 map->format.format_reg(u8, reg, map->reg_shift);
1686 u8 += reg_bytes + pad_bytes;
1687 map->format.format_val(u8, val, 0);
1691 *u8 |= map->write_flag_mask;
1693 ret = map->bus->write(map->bus_context, buf, len);
1697 for (i = 0; i < num_regs; i++) {
1698 int reg = regs[i].reg;
1699 trace_regmap_hw_write_done(map->dev, reg, 1);
1704 static unsigned int _regmap_register_page(struct regmap *map,
1706 struct regmap_range_node *range)
1708 unsigned int win_page = (reg - range->range_min) / range->window_len;
1713 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1714 struct reg_default *regs,
1719 struct reg_default *base;
1720 unsigned int this_page = 0;
1722 * the set of registers are not neccessarily in order, but
1723 * since the order of write must be preserved this algorithm
1724 * chops the set each time the page changes
1727 for (i = 0, n = 0; i < num_regs; i++, n++) {
1728 unsigned int reg = regs[i].reg;
1729 struct regmap_range_node *range;
1731 range = _regmap_range_lookup(map, reg);
1733 unsigned int win_page = _regmap_register_page(map, reg,
1737 this_page = win_page;
1738 if (win_page != this_page) {
1739 this_page = win_page;
1740 ret = _regmap_raw_multi_reg_write(map, base, n);
1746 ret = _regmap_select_page(map, &base[n].reg, range, 1);
1752 return _regmap_raw_multi_reg_write(map, base, n);
1756 static int _regmap_multi_reg_write(struct regmap *map,
1757 const struct reg_default *regs,
1763 if (!map->can_multi_write) {
1764 for (i = 0; i < num_regs; i++) {
1765 ret = _regmap_write(map, regs[i].reg, regs[i].def);
1772 if (!map->format.parse_inplace)
1775 if (map->writeable_reg)
1776 for (i = 0; i < num_regs; i++) {
1777 int reg = regs[i].reg;
1778 if (!map->writeable_reg(map->dev, reg))
1780 if (reg % map->reg_stride)
1784 if (!map->cache_bypass) {
1785 for (i = 0; i < num_regs; i++) {
1786 unsigned int val = regs[i].def;
1787 unsigned int reg = regs[i].reg;
1788 ret = regcache_write(map, reg, val);
1791 "Error in caching of register: %x ret: %d\n",
1796 if (map->cache_only) {
1797 map->cache_dirty = true;
1804 for (i = 0; i < num_regs; i++) {
1805 unsigned int reg = regs[i].reg;
1806 struct regmap_range_node *range;
1807 range = _regmap_range_lookup(map, reg);
1809 size_t len = sizeof(struct reg_default)*num_regs;
1810 struct reg_default *base = kmemdup(regs, len,
1814 ret = _regmap_range_multi_paged_reg_write(map, base,
1821 return _regmap_raw_multi_reg_write(map, regs, num_regs);
1825 * regmap_multi_reg_write(): Write multiple registers to the device
1827 * where the set of register,value pairs are supplied in any order,
1828 * possibly not all in a single range.
1830 * @map: Register map to write to
1831 * @regs: Array of structures containing register,value to be written
1832 * @num_regs: Number of registers to write
1834 * The 'normal' block write mode will send ultimately send data on the
1835 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
1836 * addressed. However, this alternative block multi write mode will send
1837 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
1838 * must of course support the mode.
1840 * A value of zero will be returned on success, a negative errno will be
1841 * returned in error cases.
1843 int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
1848 map->lock(map->lock_arg);
1850 ret = _regmap_multi_reg_write(map, regs, num_regs);
1852 map->unlock(map->lock_arg);
1856 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
1859 * regmap_multi_reg_write_bypassed(): Write multiple registers to the
1860 * device but not the cache
1862 * where the set of register are supplied in any order
1864 * @map: Register map to write to
1865 * @regs: Array of structures containing register,value to be written
1866 * @num_regs: Number of registers to write
1868 * This function is intended to be used for writing a large block of data
1869 * atomically to the device in single transfer for those I2C client devices
1870 * that implement this alternative block write mode.
1872 * A value of zero will be returned on success, a negative errno will
1873 * be returned in error cases.
1875 int regmap_multi_reg_write_bypassed(struct regmap *map,
1876 const struct reg_default *regs,
1882 map->lock(map->lock_arg);
1884 bypass = map->cache_bypass;
1885 map->cache_bypass = true;
1887 ret = _regmap_multi_reg_write(map, regs, num_regs);
1889 map->cache_bypass = bypass;
1891 map->unlock(map->lock_arg);
1895 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
1898 * regmap_raw_write_async(): Write raw values to one or more registers
1901 * @map: Register map to write to
1902 * @reg: Initial register to write to
1903 * @val: Block of data to be written, laid out for direct transmission to the
1904 * device. Must be valid until regmap_async_complete() is called.
1905 * @val_len: Length of data pointed to by val.
1907 * This function is intended to be used for things like firmware
1908 * download where a large block of data needs to be transferred to the
1909 * device. No formatting will be done on the data provided.
1911 * If supported by the underlying bus the write will be scheduled
1912 * asynchronously, helping maximise I/O speed on higher speed buses
1913 * like SPI. regmap_async_complete() can be called to ensure that all
1914 * asynchrnous writes have been completed.
1916 * A value of zero will be returned on success, a negative errno will
1917 * be returned in error cases.
1919 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1920 const void *val, size_t val_len)
1924 if (val_len % map->format.val_bytes)
1926 if (reg % map->reg_stride)
1929 map->lock(map->lock_arg);
1933 ret = _regmap_raw_write(map, reg, val, val_len);
1937 map->unlock(map->lock_arg);
1941 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
1943 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1944 unsigned int val_len)
1946 struct regmap_range_node *range;
1947 u8 *u8 = map->work_buf;
1952 range = _regmap_range_lookup(map, reg);
1954 ret = _regmap_select_page(map, ®, range,
1955 val_len / map->format.val_bytes);
1960 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1963 * Some buses or devices flag reads by setting the high bits in the
1964 * register addresss; since it's always the high bits for all
1965 * current formats we can do this here rather than in
1966 * formatting. This may break if we get interesting formats.
1968 u8[0] |= map->read_flag_mask;
1970 trace_regmap_hw_read_start(map->dev, reg,
1971 val_len / map->format.val_bytes);
1973 ret = map->bus->read(map->bus_context, map->work_buf,
1974 map->format.reg_bytes + map->format.pad_bytes,
1977 trace_regmap_hw_read_done(map->dev, reg,
1978 val_len / map->format.val_bytes);
1983 static int _regmap_bus_read(void *context, unsigned int reg,
1987 struct regmap *map = context;
1989 if (!map->format.parse_val)
1992 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1994 *val = map->format.parse_val(map->work_buf);
1999 static int _regmap_read(struct regmap *map, unsigned int reg,
2003 void *context = _regmap_map_get_context(map);
2005 WARN_ON(!map->reg_read);
2007 if (!map->cache_bypass) {
2008 ret = regcache_read(map, reg, val);
2013 if (map->cache_only)
2016 if (!regmap_readable(map, reg))
2019 ret = map->reg_read(context, reg, val);
2022 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2023 dev_info(map->dev, "%x => %x\n", reg, *val);
2026 trace_regmap_reg_read(map->dev, reg, *val);
2028 if (!map->cache_bypass)
2029 regcache_write(map, reg, *val);
2036 * regmap_read(): Read a value from a single register
2038 * @map: Register map to read from
2039 * @reg: Register to be read from
2040 * @val: Pointer to store read value
2042 * A value of zero will be returned on success, a negative errno will
2043 * be returned in error cases.
2045 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2049 if (reg % map->reg_stride)
2052 map->lock(map->lock_arg);
2054 ret = _regmap_read(map, reg, val);
2056 map->unlock(map->lock_arg);
2060 EXPORT_SYMBOL_GPL(regmap_read);
2063 * regmap_raw_read(): Read raw data from the device
2065 * @map: Register map to read from
2066 * @reg: First register to be read from
2067 * @val: Pointer to store read value
2068 * @val_len: Size of data to read
2070 * A value of zero will be returned on success, a negative errno will
2071 * be returned in error cases.
2073 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2076 size_t val_bytes = map->format.val_bytes;
2077 size_t val_count = val_len / val_bytes;
2083 if (val_len % map->format.val_bytes)
2085 if (reg % map->reg_stride)
2088 map->lock(map->lock_arg);
2090 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2091 map->cache_type == REGCACHE_NONE) {
2092 /* Physical block read if there's no cache involved */
2093 ret = _regmap_raw_read(map, reg, val, val_len);
2096 /* Otherwise go word by word for the cache; should be low
2097 * cost as we expect to hit the cache.
2099 for (i = 0; i < val_count; i++) {
2100 ret = _regmap_read(map, reg + (i * map->reg_stride),
2105 map->format.format_val(val + (i * val_bytes), v, 0);
2110 map->unlock(map->lock_arg);
2114 EXPORT_SYMBOL_GPL(regmap_raw_read);
2117 * regmap_field_read(): Read a value to a single register field
2119 * @field: Register field to read from
2120 * @val: Pointer to store read value
2122 * A value of zero will be returned on success, a negative errno will
2123 * be returned in error cases.
2125 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2128 unsigned int reg_val;
2129 ret = regmap_read(field->regmap, field->reg, ®_val);
2133 reg_val &= field->mask;
2134 reg_val >>= field->shift;
2139 EXPORT_SYMBOL_GPL(regmap_field_read);
2142 * regmap_fields_read(): Read a value to a single register field with port ID
2144 * @field: Register field to read from
2146 * @val: Pointer to store read value
2148 * A value of zero will be returned on success, a negative errno will
2149 * be returned in error cases.
2151 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2155 unsigned int reg_val;
2157 if (id >= field->id_size)
2160 ret = regmap_read(field->regmap,
2161 field->reg + (field->id_offset * id),
2166 reg_val &= field->mask;
2167 reg_val >>= field->shift;
2172 EXPORT_SYMBOL_GPL(regmap_fields_read);
2175 * regmap_bulk_read(): Read multiple registers from the device
2177 * @map: Register map to read from
2178 * @reg: First register to be read from
2179 * @val: Pointer to store read value, in native register size for device
2180 * @val_count: Number of registers to read
2182 * A value of zero will be returned on success, a negative errno will
2183 * be returned in error cases.
2185 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2189 size_t val_bytes = map->format.val_bytes;
2190 bool vol = regmap_volatile_range(map, reg, val_count);
2192 if (reg % map->reg_stride)
2195 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2197 * Some devices does not support bulk read, for
2198 * them we have a series of single read operations.
2200 if (map->use_single_rw) {
2201 for (i = 0; i < val_count; i++) {
2202 ret = regmap_raw_read(map,
2203 reg + (i * map->reg_stride),
2204 val + (i * val_bytes),
2210 ret = regmap_raw_read(map, reg, val,
2211 val_bytes * val_count);
2216 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2217 map->format.parse_inplace(val + i);
2219 for (i = 0; i < val_count; i++) {
2221 ret = regmap_read(map, reg + (i * map->reg_stride),
2225 memcpy(val + (i * val_bytes), &ival, val_bytes);
2231 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2233 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2234 unsigned int mask, unsigned int val,
2238 unsigned int tmp, orig;
2240 ret = _regmap_read(map, reg, &orig);
2248 ret = _regmap_write(map, reg, tmp);
2260 * regmap_update_bits: Perform a read/modify/write cycle on the register map
2262 * @map: Register map to update
2263 * @reg: Register to update
2264 * @mask: Bitmask to change
2265 * @val: New value for bitmask
2267 * Returns zero for success, a negative number on error.
2269 int regmap_update_bits(struct regmap *map, unsigned int reg,
2270 unsigned int mask, unsigned int val)
2274 map->lock(map->lock_arg);
2275 ret = _regmap_update_bits(map, reg, mask, val, NULL);
2276 map->unlock(map->lock_arg);
2280 EXPORT_SYMBOL_GPL(regmap_update_bits);
2283 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2284 * map asynchronously
2286 * @map: Register map to update
2287 * @reg: Register to update
2288 * @mask: Bitmask to change
2289 * @val: New value for bitmask
2291 * With most buses the read must be done synchronously so this is most
2292 * useful for devices with a cache which do not need to interact with
2293 * the hardware to determine the current register value.
2295 * Returns zero for success, a negative number on error.
2297 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
2298 unsigned int mask, unsigned int val)
2302 map->lock(map->lock_arg);
2306 ret = _regmap_update_bits(map, reg, mask, val, NULL);
2310 map->unlock(map->lock_arg);
2314 EXPORT_SYMBOL_GPL(regmap_update_bits_async);
2317 * regmap_update_bits_check: Perform a read/modify/write cycle on the
2318 * register map and report if updated
2320 * @map: Register map to update
2321 * @reg: Register to update
2322 * @mask: Bitmask to change
2323 * @val: New value for bitmask
2324 * @change: Boolean indicating if a write was done
2326 * Returns zero for success, a negative number on error.
2328 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
2329 unsigned int mask, unsigned int val,
2334 map->lock(map->lock_arg);
2335 ret = _regmap_update_bits(map, reg, mask, val, change);
2336 map->unlock(map->lock_arg);
2339 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
2342 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2343 * register map asynchronously and report if
2346 * @map: Register map to update
2347 * @reg: Register to update
2348 * @mask: Bitmask to change
2349 * @val: New value for bitmask
2350 * @change: Boolean indicating if a write was done
2352 * With most buses the read must be done synchronously so this is most
2353 * useful for devices with a cache which do not need to interact with
2354 * the hardware to determine the current register value.
2356 * Returns zero for success, a negative number on error.
2358 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2359 unsigned int mask, unsigned int val,
2364 map->lock(map->lock_arg);
2368 ret = _regmap_update_bits(map, reg, mask, val, change);
2372 map->unlock(map->lock_arg);
2376 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
2378 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2380 struct regmap *map = async->map;
2383 trace_regmap_async_io_complete(map->dev);
2385 spin_lock(&map->async_lock);
2386 list_move(&async->list, &map->async_free);
2387 wake = list_empty(&map->async_list);
2390 map->async_ret = ret;
2392 spin_unlock(&map->async_lock);
2395 wake_up(&map->async_waitq);
2397 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2399 static int regmap_async_is_done(struct regmap *map)
2401 unsigned long flags;
2404 spin_lock_irqsave(&map->async_lock, flags);
2405 ret = list_empty(&map->async_list);
2406 spin_unlock_irqrestore(&map->async_lock, flags);
2412 * regmap_async_complete: Ensure all asynchronous I/O has completed.
2414 * @map: Map to operate on.
2416 * Blocks until any pending asynchronous I/O has completed. Returns
2417 * an error code for any failed I/O operations.
2419 int regmap_async_complete(struct regmap *map)
2421 unsigned long flags;
2424 /* Nothing to do with no async support */
2425 if (!map->bus || !map->bus->async_write)
2428 trace_regmap_async_complete_start(map->dev);
2430 wait_event(map->async_waitq, regmap_async_is_done(map));
2432 spin_lock_irqsave(&map->async_lock, flags);
2433 ret = map->async_ret;
2435 spin_unlock_irqrestore(&map->async_lock, flags);
2437 trace_regmap_async_complete_done(map->dev);
2441 EXPORT_SYMBOL_GPL(regmap_async_complete);
2444 * regmap_register_patch: Register and apply register updates to be applied
2445 * on device initialistion
2447 * @map: Register map to apply updates to.
2448 * @regs: Values to update.
2449 * @num_regs: Number of entries in regs.
2451 * Register a set of register updates to be applied to the device
2452 * whenever the device registers are synchronised with the cache and
2453 * apply them immediately. Typically this is used to apply
2454 * corrections to be applied to the device defaults on startup, such
2455 * as the updates some vendors provide to undocumented registers.
2457 * The caller must ensure that this function cannot be called
2458 * concurrently with either itself or regcache_sync().
2460 int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
2463 struct reg_default *p;
2467 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2471 p = krealloc(map->patch,
2472 sizeof(struct reg_default) * (map->patch_regs + num_regs),
2475 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2477 map->patch_regs += num_regs;
2482 map->lock(map->lock_arg);
2484 bypass = map->cache_bypass;
2486 map->cache_bypass = true;
2489 ret = _regmap_multi_reg_write(map, regs, num_regs);
2495 map->cache_bypass = bypass;
2497 map->unlock(map->lock_arg);
2499 regmap_async_complete(map);
2503 EXPORT_SYMBOL_GPL(regmap_register_patch);
2506 * regmap_get_val_bytes(): Report the size of a register value
2508 * Report the size of a register value, mainly intended to for use by
2509 * generic infrastructure built on top of regmap.
2511 int regmap_get_val_bytes(struct regmap *map)
2513 if (map->format.format_write)
2516 return map->format.val_bytes;
2518 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2520 int regmap_parse_val(struct regmap *map, const void *buf,
2523 if (!map->format.parse_val)
2526 *val = map->format.parse_val(buf);
2530 EXPORT_SYMBOL_GPL(regmap_parse_val);
2532 static int __init regmap_initcall(void)
2534 regmap_debugfs_initcall();
2538 postcore_initcall(regmap_initcall);