Copy from :https://blog.csdn.net/qq_16777851/article/details/82975057
于资源,在linux中有如下定义
/*
* IO resources have these defined flags.
*/
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */
#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
#define IORESOURCE_MEM 0x00000200
#define IORESOURCE_REG 0x00000300 /* Register offsets */
#define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800
#define IORESOURCE_BUS 0x00001000
#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */
#define IORESOURCE_READONLY 0x00004000
#define IORESOURCE_CACHEABLE 0x00008000
#define IORESOURCE_RANGELENGTH 0x00010000
#define IORESOURCE_SHADOWABLE 0x00020000
#define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */
#define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */
#define IORESOURCE_MEM_64 0x00100000
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
其中最常用的就是这几种
#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
#define IORESOURCE_MEM 0x00000200
#define IORESOURCE_REG 0x00000300 /* Register offsets */
#define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800
#define IORESOURCE_BUS 0x00001000
今天我们主要来分析前三种资源,即IO、MEM、REG
几乎每一种外设都是通过读写设备上的寄存器来进行的,外设的寄存器通常被连续地编址。根据CPU体系结构的不同,CPU对IO端口的编址方式有两种:
(1)I/O映射方式(I/O-mapped)
典型地,如X86处理器为外设专门实现了一个单独的地址空间,称为"I/O地址空间"或者"I/O端口空间",CPU通过专门的I/O指令(如X86的IN和OUT指令)来访问这一空间中的地址单元,IO地址空间,这个空间从kernel编程上来看,只能通过专门的接口函数才能访问.硬件层面上,cpu需要用特殊指令才能访问或需要用特殊访问方式才能访问,不能直接用指针来寻址,.在嵌入式中,基本上没有io address space。
(2)内存映射方式(Memory-mapped)
RISC指令系统的CPU(如MIPS ARM PowerPC等)通常只实现一个物理地址空间,像这种情况,外设的I/O端口的物理地址就被映射到内存地址空间中,外设I/O端口成为内存的一部分。此时,CPU可以象访问一个内存单元那样访问外设I/O端口,而不需要设立专门的外设I/O指令。
若果搜索内核代码,会发现,只有极少量的寄存器是通过 IORESOURCE_REG来表示的,绝大多数寄存器都是通过IORESOURCE_IO 或IORESOURCE_MEM来表示的。
我们的ARM平台对寄存的访问和内存一样,所以通常我们ARM中定义寄存器资源都是采用IORESOURCE_MEM来表示。
在内核中,形容资源,使用这样一个结构体来表示的,当然这个结构体也可以做成一个树的节点被链入一棵树中。
/*
* Resources are tree-like, allowing
* nesting etc..
*/
struct resource {
resource_size_t start; /* 这段资源的起始 */
resource_size_t end; /* 这段资源的结束 */
const char *name; /* 这个资源的名字,方便用户查看 */
unsigned long flags; /* 标记属于那种资源 */
struct resource *parent, *sibling, *child; /* 作为树的节点,链入树中 */
};
比如我们要使用一段寄存器作为资源传参给具体的驱动:
static struct resource xxxx = {
/* addr */
.start = 0x04014000,
.end = 0x04014003,
.flags = IORESOURCE_MEM,
}
一般来说,在系统运行时,外设的I/O资源的物理地址是已知的,有硬件决定,查看手册即可知道。但是CPU通常并没有为这些已知的外设I/O的物理地址分配虚拟地址,所以驱动程序并不能直接通过物理地址来访问I/O的地址资源,而必须将它们映射到核心虚拟地址空间(通过页表),然后才能根据映射所得到的核心虚拟地址范围,通过访问指令来访问这些I/O内存资源。linux中在io.h头文件中申明了函数ioremap(),用来将I/O内存资源的物理地址映射到核心的虚拟地址空间。
一般我们,使用I/O内存首先要申请,然后才能映射,使用I/O端口首先要申请,或者叫请求,对于I/O端口的请求意思是让内核知道你要访问这个端口,这样内核知道了以后它就不会再让别人也访问这个端口了,不然两个用户同时访问一个硬件可能会有问题的。
这边先以平台总线中添加一段MEM资源为例,分析一下,资源的如何加入资源树。
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
*/
int platform_device_register(struct platform_device *pdev)
{
device_initialize(&pdev->dev);
arch_setup_pdev_archdata(pdev);
return platform_device_add(pdev);
}
/**
* device_initialize - init device structure.
* @dev: device.
*
* This prepares the device for use by other layers by initializing
* its fields.
* It is the first half of device_register(), if called by
* that function, though it can also be called separately, so one
* may use @dev's fields. In particular, get_device()/put_device()
* may be used for reference counting of @dev after calling this
* function.
*
* All fields in @dev must be initialized by the caller to 0, except
* for those explicitly set to some other value. The simplest
* approach is to use kzalloc() to allocate the structure containing
* @dev.
*
* NOTE: Use put_device() to give up your reference instead of freeing
* @dev directly once you have called this function.
*/
void device_initialize(struct device *dev)
{
/* 初始化dev信息 */
dev->kobj.kset = devices_kset;
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, -1);
}
/**
* arch_setup_pdev_archdata - Allow manipulation of archdata before its used
* @pdev: platform device
*
* This is called before platform_device_add() such that any pdev_archdata may
* be setup before the platform_notifier is called. So if a user needs to
* manipulate any relevant information in the pdev_archdata they can do:
*
* platform_device_alloc()
* ... manipulate ...
* platform_device_add()
*
* And if they don't care they can just call platform_device_register() and
* everything will just work out.
*/
void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
{
/* 预留给特殊架构的 */
}
/**
* platform_device_add - add a platform device to device hierarchy
* @pdev: platform device we're adding
*
* This is part 2 of platform_device_register(), though may be called
* separately _iff_ pdev was allocated by platform_device_alloc().
*/
int platform_device_add(struct platform_device *pdev)
{
int i, ret;
if (!pdev)
return -EINVAL;
if (!pdev->dev.parent)
pdev->dev.parent = &platform_bus; /* 增加总线的父设备为平台设备 */
pdev->dev.bus = &platform_bus_type; /* 设备挂接在平台总线 */
switch (pdev->id) {
default: /* 自己设置设备标号 */
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
break;
case PLATFORM_DEVID_NONE: /* -1表示不需要设备标号 */
dev_set_name(&pdev->dev, "%s", pdev->name);
break;
case PLATFORM_DEVID_AUTO: /* 由总线分配设备标号 */
/*
* Automatically allocated device ID. We mark it as such so
* that we remember it must be freed, and we append a suffix
* to avoid namespace collision with explicit IDs.
*/
ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
pdev->id = ret;
pdev->id_auto = true;
dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
break;
}
/* 对该设备的资源插入资源树,如果已经有设备插入则会插入失败 */
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
if (r->name == NULL)
r->name = dev_name(&pdev->dev); /* 如果资源没设置名字,则和设备名一样 */
p = r->parent;
if (!p) { /* 如果没设置资源的父节点,则检查是否是IORESOURCE_MEM或IORESOURCE_IO资源,如果说是,则把他们插入到这个资源所在的资源树中 */
if (resource_type(r) == IORESOURCE_MEM)
p = &iomem_resource; /* iomem资源树,信息 */
else if (resource_type(r) == IORESOURCE_IO)
p = &ioport_resource; /* ioport资源树,信息 */
}
/* 这里p必须存在,即如果自己没设置,默认也只添加 IO和MEM资源到资源树中 */
if (p && insert_resource(p, r)) {
dev_err(&pdev->dev, "failed to claim resource %d
", i);
ret = -EBUSY;
goto failed;
}
}
pr_debug("Registering platform device '%s'. Parent at %s
",
dev_name(&pdev->dev), dev_name(pdev->dev.parent));
/* 增加设备,节点,sysfs信息,做匹配等 */
ret = device_add(&pdev->dev);
if (ret == 0)
return ret;
failed:
if (pdev->id_auto) {
ida_simple_remove(&platform_devid_ida, pdev->id);
pdev->id = PLATFORM_DEVID_AUTO;
}
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
if (r->parent)
release_resource(r);
}
err_out:
return ret;
}
这里我们先分析一下资源树的总信息,后面分析如何把资源加入资源树。(kernel/resource.c)
struct resource ioport_resource = {
.name = "PCI IO",
.start = 0,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
struct resource iomem_resource = {
.name = "PCI mem",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
如果我们是32位的处理器们可以看到iomem资源,即PCI mem资源的大小为0~0xffff,ffff.
而ioport则是采用一个宏来表示的,搜索后发现
/*
* This is the limit of PC card/PCI/ISA IO space, which is by default
* 64K if we have PC card, PCI or ISA support. Otherwise, default to
* zero to prevent ISA/PCI drivers claiming IO space (and potentially
* oopsing.)
*
* Only set this larger if you really need inb() et.al. to operate over
* a larger address space. Note that SOC_COMMON ioremaps each sockets
* IO space area, and so inb() et.al. must be defined to operate as per
* readb() et.al. on such platforms.
*/
#ifndef IO_SPACE_LIMIT
#if defined(CONFIG_PCMCIA_SOC_COMMON) || defined(CONFIG_PCMCIA_SOC_COMMON_MODULE)
#define IO_SPACE_LIMIT ((resource_size_t)0xffffffff)
#elif defined(CONFIG_PCI) || defined(CONFIG_ISA) || defined(CONFIG_PCCARD)
#define IO_SPACE_LIMIT ((resource_size_t)0xffff)
#else
#define IO_SPACE_LIMIT ((resource_size_t)0)
#endif
#endif
如果定义了PCI设备其值才不为0,否则值为0。
即如果没PCI相关的宏,是不能使用IORESOURCE_IO定义资源的,否则肯定在注册资源树的时候就失败了。
下面我们看一下平台总线中,如果把一段MEM插入到资源树中。(先说一下,系统采用多叉树的策略插入资源的)
/**
* insert_resource - Inserts a resource in the resource tree
* @parent: parent of the new resource
* @new: new resource to insert
*
* Returns 0 on success, -EBUSY if the resource can't be inserted.
*/
int insert_resource(struct resource *parent, struct resource *new)
{
struct resource *conflict;
conflict = insert_resource_conflict(parent, new);
return conflict ? -EBUSY : 0;
}
/**
* insert_resource_conflict - Inserts resource in the resource tree
* @parent: parent of the new resource
* @new: new resource to insert
*
* Returns 0 on success, conflict resource if the resource can't be inserted.
*
* This function is equivalent to request_resource_conflict when no conflict
* happens. If a conflict happens, and the conflicting resources
* entirely fit within the range of the new resource, then the new
* resource is inserted and the conflicting resources become children of
* the new resource.
*/
struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
{
struct resource *conflict;
write_lock(&resource_lock);
conflict = __insert_resource(parent, new);
write_unlock(&resource_lock);
return conflict;
}
/*
* Insert a resource into the resource tree. If successful, return NULL,
* otherwise return the conflicting resource (compare to __request_resource())
*/
static struct resource * __insert_resource(struct resource *parent, struct resource *new)
{
struct resource *first, *next;
for (;; parent = first) {
first = __request_resource(parent, new);
if (!first)
return first;
if (first == parent)
return first;
if (WARN_ON(first == new)) /* duplicated insertion */
return first;
if ((first->start > new->start) || (first->end < new->end))
break;
if ((first->start == new->start) && (first->end == new->end))
break;
}
for (next = first; ; next = next->sibling) {
/* Partial overlap? Bad, and unfixable */
if (next->start < new->start || next->end > new->end)
return next;
if (!next->sibling)
break;
if (next->sibling->start > new->end)
break;
}
new->parent = parent;
new->sibling = next->sibling;
new->child = first;
next->sibling = NULL;
for (next = first; next; next = next->sibling)
next->parent = new;
if (parent->child == first) {
parent->child = new;
} else {
next = parent->child;
while (next->sibling != first)
next = next->sibling;
next->sibling = new;
}
return NULL;
}
/* Return the conflict entry if you can't request it */
static struct resource * __request_resource(struct resource *root, struct resource *new)
{
resource_size_t start = new->start;
resource_size_t end = new->end;
struct resource *tmp, **p;
if (end < start) /* 输入的参数范围有误 */
return root;
if (start < root->start) /* 输入的参数不再资源总范围内 */
return root;
if (end > root->end) /* 输入的参数不再资源总范围内 */
return root;
p = &root->child;
for (;;) {
tmp = *p;
if (!tmp || tmp->start > end) {
new->sibling = tmp;
*p = new;
new->parent = root;
return NULL;
}
p = &tmp->sibling;
if (tmp->end < start)
continue;
return tmp;
}
}
首先对re__insert_resource函数里面举例进行分析:
先假设我们要请求的是iomem_resource ,资源的总范围是0~0xffff,ffff,假设目前没被任何设备所申请。
首先假设我们申请的是0xa000,0000~0xafff,ffff这段资源,这里称为A:
三个判断通过后, p = &root->child,因为root资源还没被申请所以child为NULL,即tmp = *p = NULL,因为满足for下面的第一个if的!tmp而进入if里面,此时new->sibling = tmp = NULL, *p = new 即root的child指向new,这个资源的parent 指向root.
即完成下图这样的一个绑定
之后我们假设四种情况,在上图的基础上请求一段资源
1.请求的资源的end小于A资源的start(0x5000,0000~0x5fff,ffff)
2请求的资源的start大于A资源的end(0xc000,0000~0xcfff,ffff)
3.请求的资源的start小于A资源的start,但请求资源的end大于A资源的start(即请求资源的end位于A资源的中间[0x9ffff,0000~0xa000,ffff])
4.请求的资源的start大于A资源的start,但请求资源的start大于A资源的end(即请求资源的start位于A资源中间[0xaffff,0000~0xb000,ffff])
首先第一种情况:【0x5000,0000~0x5fff,ffff】
会因为请求资源的end小于A资源的start,即满足tmp->start > end条件而进入,for后面的第一个if里面,之后完成下图的绑定。
第二种情况【0xc000,0000~0xcfff,ffff】
因为第一次不满足for后面的第一个if条件,而执行后面几行语句
for (;;) {
tmp = *p;
if (!tmp || tmp->start > end) {
new->sibling = tmp;
*p = new;
new->parent = root;
return NULL;
}
p = &tmp->sibling;
if (tmp->end < start)
continue;
return tmp;
}
此时因为tmp本身是指向A的,所以tmp->sibling会指向NULL,有因为A->end = 0xafff,ffff,小于0xc000,0000,所以执行continue,又会到前面指行for后面的语句。
此时因为p已经指向A->sibling的地址,而A->sibling的值为NULL。故这次因为tmp为NULL,满足进入for后面的if中,之后完成如下图的绑定
第三种情况【0x9ffff,0000~0xa000,ffff】
因为第一次不满足for后面的第一个if条件,而执行后面几行语句
for (;;) {
tmp = *p;
if (!tmp || tmp->start > end) {
new->sibling = tmp;
*p = new;
new->parent = root;
return NULL;
}
p = &tmp->sibling;
if (tmp->end < start)
continue;
return tmp;
}
此时tmp指向A,之后因为不满足A->end(0xafff,ffff) < start(0x9ffff,000) 而退出,返回tmp,即A的值,此时根下面挂接的A信息没有改变。
第四种情况【0xaffff,0000~0xb000,ffff]】
同样会因为A->end(0xafff,ffff)<start(0xafff,0000)而退出,返回A值本身。
我们再多分析一种情况,即在B2情况下,继续插入0xA000,0000~0xBfff,ffff
1.此时刚进入for里面,会因为tmp->start(0xa000,0000) > end(0xBfff,ffff)不满足,而执行下面的语句。
2.之后因为tmp->end(0xafff,ffff) < start(0xa000,0000)不满足,而退出,返回tmp,即B2的值
总结
分析一下以四种情况,可见,如果插入的资源只要和根节点上挂的没重合,都是可以插入进去的。
(注意这里插入失败返回的是失败位置的,这点很重要,后面要用到)
这里我们做个小实验,使用下面命令查看一下资源。
cat /proc/iomem
[root@linux]/# cat /proc/iomem
30000000-42ffffff : System RAM
30008000-3039ad13 : Kernel code
303be000-3042e7bb : Kernel data
43800000-4fffffff : System RAM
88000000-88000000 : dm9000
88000000-88000000 : dm9000
88000004-88000007 : dm9000
88000004-88000007 : dm9000
e0900000-e0900fff : dma-pl330.0
e0900000-e0900fff : dma-pl330.0
e0a00000-e0a00fff : dma-pl330.1
e0a00000-e0a00fff : dma-pl330.1
e1100000-e11000ff : samsung-spdif
e1600000-e160001f : s5pv210-keypad
e1700000-e17000ff : s3c64xx-ts
e1700000-e17000ff : samsung-adc-v3
e1800000-e1800fff : s3c2440-i2c.0
e1a00000-e1a00fff : s3c2440-i2c.2
e2200000-e22000ff : samsung-ac97
e2500000-e2500fff : samsung-pwm
e2700000-e27003ff : s3c2410-wdt
e2800000-e28000ff : s3c64xx-rtc
e2900000-e29000ff : s5pv210-uart.0
e2900000-e29000ff : s5pv210-uart
e2900400-e29004ff : s5pv210-uart.1
e2900400-e29004ff : s5pv210-uart
e2900800-e29008ff : s5pv210-uart.2
e2900800-e29008ff : s5pv210-uart
e2900c00-e2900cff : s5pv210-uart.3
e2900c00-e2900cff : s5pv210-uart
e8200000-e8203fff : s5pv210-pata.0
eb000000-eb000fff : s3c-sdhci.0
eb100000-eb100fff : s3c-sdhci.1
eb200000-eb200fff : s3c-sdhci.2
eb300000-eb300fff : s3c-sdhci.3
ec000000-ec01ffff : s3c-hsotg
eee30000-eee300ff : samsung-i2s.0
f1700000-f170ffff : s5p-mfc
f8000000-f8003fff : s5pv210-fb
fab00000-fab00fff : s3c2440-i2c.1
fb200000-fb200fff : s5pv210-fimc.0
fb300000-fb300fff : s5pv210-fimc.1
fb400000-fb400fff : s5pv210-fimc.2
fb600000-fb600fff : s5p-jpeg.0
[root@linux]/#
注意上面的缩进,缩进的都是属于上面那个的
注意我这里红框圈出来的这部分。
此时我们插入下图这段资源
结果如下图所示
四个uart的资源都属于了myled资源里面了(注意缩进)
此时rmmod我们看一下情况
[root@linux]/# rmmod /drivers/source_dev.ko
[root@linux]/# cat /proc/iomem
30000000-42ffffff : System RAM
30008000-3039ad13 : Kernel code
303be000-3042e7bb : Kernel data
43800000-4fffffff : System RAM
88000000-88000000 : dm9000
88000000-88000000 : dm9000
88000004-88000007 : dm9000
88000004-88000007 : dm9000
e0900000-e0900fff : dma-pl330.0
e0900000-e0900fff : dma-pl330.0
e0a00000-e0a00fff : dma-pl330.1
e0a00000-e0a00fff : dma-pl330.1
e1100000-e11000ff : samsung-spdif
e1600000-e160001f : s5pv210-keypad
e1700000-e17000ff : s3c64xx-ts
e1700000-e17000ff : samsung-adc-v3
e1800000-e1800fff : s3c2440-i2c.0
e1a00000-e1a00fff : s3c2440-i2c.2
e2200000-e22000ff : samsung-ac97
e2500000-e2500fff : samsung-pwm
e2700000-e27003ff : s3c2410-wdt
e2800000-e28000ff : s3c64xx-rtc
e8200000-e8203fff : s5pv210-pata.0
eb000000-eb000fff : s3c-sdhci.0
eb100000-eb100fff : s3c-sdhci.1
eb200000-eb200fff : s3c-sdhci.2
eb300000-eb300fff : s3c-sdhci.3
ec000000-ec01ffff : s3c-hsotg
eee30000-eee300ff : samsung-i2s.0
f1700000-f170ffff : s5p-mfc
f8000000-f8003fff : s5pv210-fb
fab00000-fab00fff : s3c2440-i2c.1
fb200000-fb200fff : s5pv210-fimc.0
fb300000-fb300fff : s5pv210-fimc.1
fb400000-fb400fff : s5pv210-fimc.2
fb600000-fb600fff : s5p-jpeg.0
[root@linux]/#
可以发现e2900000这段资源都已经找不到了,这又是一个疑问点。
可见重复了的也是可以插入的,我们这里执行不了,而是其他地方又做了处理。接下来我们继续分析。
接下来分析上面函数的调用者
/*
* Insert a resource into the resource tree. If successful, return NULL,
* otherwise return the conflicting resource (compare to __request_resource())
*/
static struct resource * __insert_resource(struct resource *parent, struct resource *new)
{
struct resource *first, *next;
/* 注意这里是个循环,失败了也是会继续更新parrnt尝试插入的,除非返回值为NULL,或new相对与parent无效 */
for (;; parent = first) {
first = __request_resource(parent, new); /* 返回NULL表示已经插入 */
if (!first)
return first; /* NULL表示已经插入,这里直接返回 */
if (first == parent) /* 插入的超过root范围或插入范围无效 */
return first;
if (WARN_ON(first == new)) /* duplicated insertion 重复插入个资源 */
return first;
/* 失败节点,的start大于new->start 或 失败节点的end小于new->end,即如下图1所示 */
if ((first->start > new->start) || (first->end < new->end))
break;
/* new和某段资源重复了 */
if ((first->start == new->start) && (first->end == new->end))
break;
}
/* 到这里表示通过break出来的,即要插入的资源和资源池里面的资源有重叠 */
for (next = first; ; next = next->sibling) {
/* Partial overlap? Bad, and unfixable,部分重叠会认为是bad, */
if (next->start < new->start || next->end > new->end)
return next;
if (!next->sibling) /* 观察最前面我的插入图,只有第一个插入的sibling才是NULL,即已经查找完所有的了,即会直接挂在root下面 */
break;
if (next->sibling->start > new->end) /* 找到的某个资源的start大于new->end,即new资源整个都小于next->siblig,即可能可以做next->siblig节点的child */
break;
}
new->parent = parent; /* new的parent指向根节点 */
new->sibling = next->sibling; /* new的sibling指向上一个节点(new < next) */
new->child = first; /* 挂接属于new范围内的资源到new里面 */
next->sibling = NULL;
for (next = first; next; next = next->sibling)
next->parent = new;
if (parent->child == first) {
parent->child = new;
} else {
next = parent->child;
while (next->sibling != first)
next = next->sibling;
next->sibling = new;
}
return NULL;
}
注意:
1.上面的duplicated insertion,表示把A资源重复插入,而((first->start == new->start) && (first->end == new->end))则是表示,有另一个设备申请已经被申请的这段资源。
图1,这里是或的关系喔
接下来,我们在我们在最上面2的基础以及__insert_resource函数中,再次分三种情况进行分析。
首先,我们对B5通过更高一层的函数,这里再次进行插入C1[0xA000,0000~0xBfff,ffff]
1.和上面的情况一样,失败,但返回值是B2
for (;; parent = first) {
first = __request_resource(parent, new);
if (!first)
return first;
if (first == parent)
return first;
if (WARN_ON(first == new)) /* duplicated insertion */
return first;
// 0xc0000000 0xA0000000 0xcfffffff 0xbfffffff
if ((first->start > new->start) || (first->end < new->end))
break;
if ((first->start == new->start) && (first->end == new->end))
break;
}
2.接下来检查会因为满足第三个if条件,而退出,此时parent = root , first = B2
3.指行下面代码,会因为第一个if重复了,而退出, 插入失败
for (next = first; ; next = next->sibling) {
/* Partial overlap? Bad, and unfixable */
/* 0xcfffffff 0xbfffffff */
if (next->start < new->start || next->end > new->end)
return next;
if (!next->sibling)
break;
if (next->sibling->start > new->end)
break;
}
接下来,我们在最上面2的基础上再次插入一组资源C2(0xB000,0000~0xBfff,ffff)
1.因为会在第一次进入__request_resource里面因为不满足tmp->start(0xa000,0000) > end(0xbfff,ffff),指行后面的tmp->end(0xafff,ffff) < start(0xb000,0000),因为满足,条件,指行continue,此时tmp为B2,比较tmp->start(0xC000,0000) > end(0xbfff,ffff)后,发现可以插入,返回NULL。下面是插入成功后的结果。
接着,我们继续上面的基础上插入和C2资源相同的资源C2_【0xb000,0000~0xbfff,ffff】。
1.进入__request_resource函数,参数分别是root和C2_。此时p = &root->child为A,会因为tmp->start(0xA000,0000) > end(0xbfff,ffff)不满足,执行下面的语句。这次因为tmp->end(0xafff,ffff) < start(0xB000,0000)满足,而continue到for后面,此时tmp为C2。这次还会因为C2的start不满足C2_的end而执行后面的if,但这次因为C2和C2_范围一样,故不满足if (tmp->end < start),进而返回C2,退出,返回C2
for (;;) {
tmp = *p;
if (!tmp || tmp->start > end) {
new->sibling = tmp;
*p = new;
new->parent = root;
return NULL;
}
p = &tmp->sibling;
if (tmp->end < start)
continue;
return tmp;
}
2.此时恰好满足下面的最后一条语句,进而break退出这个for,此时的first是C2,parent是root
for (;; parent = first) {
first = __request_resource(parent, new);
if (!first)
return first;
if (first == parent)
return first;
if (WARN_ON(first == new)) /* duplicated insertion */
return first;
if ((first->start > new->start) || (first->end < new->end))
break;
if ((first->start == new->start) && (first->end == new->end))
break;
}
3.接下来进行比较.因为C2和C2_范围相等,所以第一个if不满足部分重叠,且C2的sibling是存在的B2,所以执行最后一句,此时刚好,B2的start(0xc000,0000)时大于C2的end(0xbfff,ffff),可以break退出,此时fist是C2,next是C2,parent还是root
for (next = first; ; next = next->sibling) {
/* Partial overlap? Bad, and unfixable */
if (next->start < new->start || next->end > new->end)
return next;
if (!next->sibling)
break;
if (next->sibling->start > new->end)
break;
}
4.此时fist是C2,next是C2,parent是root(因为我们最前面的for,第2步是在最后一个if相等,退出来的,并没回到for里面去)
new->parent = parent; /* C2_->parent = root */
new->sibling = next->sibling; /* C2_->sibling = C2->sibling = B2 */
new->child = first; /* C2_->child = C2 */
next->sibling = NULL; /* next->sibling = C2->sibling = NULL */
/* 下面的for则会这样指行,即
next = C2;
C2->parent = C2_;
next = C2->sibling = NULL(就在上面三行前,置位NULL的);
退出
*/
for (next = first; next; next = next->sibling)
next->parent = new;
看一下执行到这里的数据结构是怎样的
此时我们可以看到,会把新的C2_作为之前C2的parent,这里我们可看到,父资源总是大于等于子资源。且新增加的相同的资源是之前的父资源。这个就和我们之前的对应上了。
接下来我们把后面的代码分析完。
4.parent->child = A first = C2,我们这里不相等,当然我们这里多说一句,如果相等的情况就是我们这次插入的是和A范围相同的资源
此时要把A_作为parent的child,才能实现,后面完整的树的查找遍历。
if (parent->child == first) {
parent->child = new;
} else {
next = parent->child; /* 从A开始查找 */
while (next->sibling != first) /* 找到first C2 (我们这里第一步就找到了) */
next = next->sibling;
next->sibling = new; /* A->sibling = C2_ */
}
return NULL;
可以看到,这是一个很完美的树形结构,从root可以yi'c依次遍历所有的节点。
这里再对比我们之前的分析,做个小结:
1.__insert_resource函数第一次进来parent传的是root,会进入__request_resource,,对于root的child总线所有资源里最小的那个。(比如我们上面的A)
2.之后检查new的范围,和是否要插入的在root范围内,这一次因为root的start和end范围很大,只要参数不传错都是可以成功的,但对其它层可能就会不满足的,即返回传入的节点root。
3.之后对child所属的这一层(相同的parent为同一层,同层之间用sibling连接)从小到大查找是不是,如果小于本层的某个资源,且和本层的资源没重叠,则直接插入,否则退出,返回(如果是没找到,则返回的是本层的最大者的地址,否则如果是重叠了,则返回的是重叠的那个资源的地址)。
4.在__insert_resource里面判断,如果是成功插入,返回NULL,则退出。否则,检查参数,如果是要插入的和返回的有重叠,则break跳出循环,否则调到第2步。
5.之后检查new,如果和本层的某一个资源部分重叠,则不能插入,结束。如果本层查找结束,或小于本层的某一个资源,则跳出。
6.现在说明,new里面至少包含一个完整的资源,接下来就是把new替换掉new包含的部分资源,并把new包含的资源挂接到new下面,同时对挂接层的资源的parent都改为new本身。
7.最后,把new的同层的前一个节点的sibling指向new
这里需要注意的是,加入我们挂接了某个重复资源myled,则之前的资源就挂载它的child里面了
如果我们把myled资源释放掉,则挂载它下面的资源也会释放掉
一个资源下挂载多个
卸载后,都不见了
在这个的基础上
假设我们注册这段资源有部分重复的资源会怎么样
结果如下,会注册失败
最后,我对几个常用的资源注册函数进行分析
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
void __iomem *devm_request_and_ioremap(struct device *dev,
struct resource *res);
void __iomem *devm_request_and_ioremap(struct device *dev,
struct resource *res)
{
void __iomem *dest_ptr;
dest_ptr = devm_ioremap_resource(dev, res);
if (IS_ERR(dest_ptr))
return NULL;
return dest_ptr;
}
/**
* devm_ioremap_resource() - check, request region, and ioremap resource
* @dev: generic device to handle the resource for
* @res: resource to be handled
*
* Checks that a resource is a valid memory region, requests the memory region
* and ioremaps it either as cacheable or as non-cacheable memory depending on
* the resource's flags. All operations are managed and will be undone on
* driver detach.
*
* Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
* on failure. Usage example:
*
* res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
* base = devm_ioremap_resource(&pdev->dev, res);
* if (IS_ERR(base))
* return PTR_ERR(base);
*/
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
{
resource_size_t size;
const char *name;
void __iomem *dest_ptr;
BUG_ON(!dev);
/* 通常我们arm上ioremap的都是mem资源的,不是则退出 */
if (!res || resource_type(res) != IORESOURCE_MEM) {
dev_err(dev, "invalid resource
");
return IOMEM_ERR_PTR(-EINVAL);
}
size = resource_size(res); /* 得到资源大小 */
name = res->name ?: dev_name(dev); /* 资源如果申请时没起名,就和设备名一样 */
if (!devm_request_mem_region(dev, res->start, size, name)) { /* 请求资源 */
dev_err(dev, "can't request region for resource %pR
", res);
return IOMEM_ERR_PTR(-EBUSY);
}
/* ioremap映射物理地址到虚拟地址 */
if (res->flags & IORESOURCE_CACHEABLE)
dest_ptr = devm_ioremap(dev, res->start, size);
else
dest_ptr = devm_ioremap_nocache(dev, res->start, size);
if (!dest_ptr) {
dev_err(dev, "ioremap failed for resource %pR
", res);
devm_release_mem_region(dev, res->start, size);
dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
}
return dest_ptr;
}
/* IO资源的大小 */
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}
#define devm_request_mem_region(dev,start,n,name)
__devm_request_region(dev, &iomem_resource, (start), (n), (name))
/*
* Managed region resource
*/
struct region_devres {
struct resource *parent;
resource_size_t start;
resource_size_t n;
};
struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name)
{
struct region_devres *dr = NULL;
struct resource *res;
/* 申请空间,管理资源 */
dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
GFP_KERNEL);
if (!dr)
return NULL;
/* 初始化 */
dr->parent = parent;
dr->start = start;
dr->n = n;
/* 申请资源 */
res = __request_region(parent, start, n, name, 0);
if (res)
devres_add(dev, dr); /* 把申请到的资源,加入到该设备中 */
else
devres_free(dr);
return res;
}
static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
/**
* __request_region - create a new busy resource region
* @parent: parent resource descriptor
* @start: resource start address
* @n: resource region size
* @name: reserving caller's ID string
* @flags: IO resource flags
*/
struct resource * __request_region(struct resource *parent,
resource_size_t start, resource_size_t n,
const char *name, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct resource *res = alloc_resource(GFP_KERNEL);
if (!res)
return NULL;
/* 填充资源信息 */
res->name = name;
res->start = start;
res->end = start + n - 1;
res->flags = resource_type(parent);
res->flags |= IORESOURCE_BUSY | flags; /* 注意这里默认加了busy标志 */
write_lock(&resource_lock);
for (;;) {
struct resource *conflict;
/* 插入资源(我们之前已经分析过了,插入成功则返回NULL) */
conflict = __request_resource(parent, res);
if (!conflict)
break;
if (conflict != parent) { /* 这次的范围属于申请的范围内,继续查找如果这段资源忙的话,则等待忙完 */
if (!(conflict->flags & IORESOURCE_BUSY)) {
parent = conflict;
continue;
}
}
/* 如果这个资源是多个设备轮换使用的话,把这个设备加入等待队列,等资源可用再被唤醒 */
if (conflict->flags & flags & IORESOURCE_MUXED) {
add_wait_queue(&muxed_resource_wait, &wait);
write_unlock(&resource_lock);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule(); /* 这边直接调度走 */
remove_wait_queue(&muxed_resource_wait, &wait); /* 唤醒说明资源到为了,这里删除掉从等待队列 */
write_lock(&resource_lock);
continue;
}
/* Uhhuh, that didn't work out.. */
free_resource(res);
res = NULL; /* 范围NULL说明没申请到 */
break;
}
write_unlock(&resource_lock);
return res; /* 返回申请到的资源地址 */
}
把该资源加入到资源设备链表中,注销设备时就会释放掉该资源,以及占用的空间。
void devres_add(struct device *dev, void *res)
{
struct devres *dr = container_of(res, struct devres, data);
unsigned long flags;
spin_lock_irqsave(&dev->devres_lock, flags);
add_dr(dev, &dr->node);
spin_unlock_irqrestore(&dev->devres_lock, flags);
}
static void add_dr(struct device *dev, struct devres_node *node)
{
devres_log(dev, node, "ADD");
BUG_ON(!list_empty(&node->entry));
list_add_tail(&node->entry, &dev->devres_head);
}
这理我们再看一下,申请资源时已经绑定了释放函数,到时候会卸载设备时,采用回调方式的形式,释放掉资源的。
struct resource * __devm_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name)
{
struct region_devres *dr = NULL;
struct resource *res;
dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
GFP_KERNEL);
if (!dr)
return NULL;
dr->parent = parent;
dr->start = start;
dr->n = n;
res = __request_region(parent, start, n, name, 0);
if (res)
devres_add(dev, dr);
else
devres_free(dr);
return res;
}
————————————————
版权声明:本文为CSDN博主「to_run_away」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/qq_16777851/article/details/82975057