| /* | 
 |  * kref.h - library routines for handling generic reference counted objects | 
 |  * | 
 |  * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | 
 |  * Copyright (C) 2004 IBM Corp. | 
 |  * | 
 |  * based on kobject.h which was: | 
 |  * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> | 
 |  * Copyright (C) 2002-2003 Open Source Development Labs | 
 |  * | 
 |  * This file is released under the GPLv2. | 
 |  * | 
 |  */ | 
 |  | 
 | #ifndef _KREF_H_ | 
 | #define _KREF_H_ | 
 |  | 
 | #include <linux/bug.h> | 
 | #include <linux/atomic.h> | 
 | #include <linux/kernel.h> | 
 | #include <linux/mutex.h> | 
 | #include <linux/spinlock.h> | 
 |  | 
 | struct kref { | 
 | 	atomic_t refcount; | 
 | }; | 
 |  | 
 | /** | 
 |  * kref_init - initialize object. | 
 |  * @kref: object in question. | 
 |  */ | 
 | static inline void kref_init(struct kref *kref) | 
 | { | 
 | 	atomic_set(&kref->refcount, 1); | 
 | } | 
 |  | 
 | /** | 
 |  * kref_get - increment refcount for object. | 
 |  * @kref: object. | 
 |  */ | 
 | static inline void kref_get(struct kref *kref) | 
 | { | 
 | 	/* If refcount was 0 before incrementing then we have a race | 
 | 	 * condition when this kref is freeing by some other thread right now. | 
 | 	 * In this case one should use kref_get_unless_zero() | 
 | 	 */ | 
 | 	WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2); | 
 | } | 
 |  | 
 | /** | 
 |  * kref_sub - subtract a number of refcounts for object. | 
 |  * @kref: object. | 
 |  * @count: Number of recounts to subtract. | 
 |  * @release: pointer to the function that will clean up the object when the | 
 |  *	     last reference to the object is released. | 
 |  *	     This pointer is required, and it is not acceptable to pass kfree | 
 |  *	     in as this function.  If the caller does pass kfree to this | 
 |  *	     function, you will be publicly mocked mercilessly by the kref | 
 |  *	     maintainer, and anyone else who happens to notice it.  You have | 
 |  *	     been warned. | 
 |  * | 
 |  * Subtract @count from the refcount, and if 0, call release(). | 
 |  * Return 1 if the object was removed, otherwise return 0.  Beware, if this | 
 |  * function returns 0, you still can not count on the kref from remaining in | 
 |  * memory.  Only use the return value if you want to see if the kref is now | 
 |  * gone, not present. | 
 |  */ | 
 | static inline int kref_sub(struct kref *kref, unsigned int count, | 
 | 	     void (*release)(struct kref *kref)) | 
 | { | 
 | 	WARN_ON(release == NULL); | 
 |  | 
 | 	if (atomic_sub_and_test((int) count, &kref->refcount)) { | 
 | 		release(kref); | 
 | 		return 1; | 
 | 	} | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * kref_put - decrement refcount for object. | 
 |  * @kref: object. | 
 |  * @release: pointer to the function that will clean up the object when the | 
 |  *	     last reference to the object is released. | 
 |  *	     This pointer is required, and it is not acceptable to pass kfree | 
 |  *	     in as this function.  If the caller does pass kfree to this | 
 |  *	     function, you will be publicly mocked mercilessly by the kref | 
 |  *	     maintainer, and anyone else who happens to notice it.  You have | 
 |  *	     been warned. | 
 |  * | 
 |  * Decrement the refcount, and if 0, call release(). | 
 |  * Return 1 if the object was removed, otherwise return 0.  Beware, if this | 
 |  * function returns 0, you still can not count on the kref from remaining in | 
 |  * memory.  Only use the return value if you want to see if the kref is now | 
 |  * gone, not present. | 
 |  */ | 
 | static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) | 
 | { | 
 | 	return kref_sub(kref, 1, release); | 
 | } | 
 |  | 
 | /** | 
 |  * kref_put_spinlock_irqsave - decrement refcount for object. | 
 |  * @kref: object. | 
 |  * @release: pointer to the function that will clean up the object when the | 
 |  *	     last reference to the object is released. | 
 |  *	     This pointer is required, and it is not acceptable to pass kfree | 
 |  *	     in as this function. | 
 |  * @lock: lock to take in release case | 
 |  * | 
 |  * Behaves identical to kref_put with one exception.  If the reference count | 
 |  * drops to zero, the lock will be taken atomically wrt dropping the reference | 
 |  * count.  The release function has to call spin_unlock() without _irqrestore. | 
 |  */ | 
 | static inline int kref_put_spinlock_irqsave(struct kref *kref, | 
 | 		void (*release)(struct kref *kref), | 
 | 		spinlock_t *lock) | 
 | { | 
 | 	unsigned long flags; | 
 |  | 
 | 	WARN_ON(release == NULL); | 
 | 	if (atomic_add_unless(&kref->refcount, -1, 1)) | 
 | 		return 0; | 
 | 	spin_lock_irqsave(lock, flags); | 
 | 	if (atomic_dec_and_test(&kref->refcount)) { | 
 | 		release(kref); | 
 | 		local_irq_restore(flags); | 
 | 		return 1; | 
 | 	} | 
 | 	spin_unlock_irqrestore(lock, flags); | 
 | 	return 0; | 
 | } | 
 |  | 
 | static inline int kref_put_mutex(struct kref *kref, | 
 | 				 void (*release)(struct kref *kref), | 
 | 				 struct mutex *lock) | 
 | { | 
 | 	WARN_ON(release == NULL); | 
 | 	if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { | 
 | 		mutex_lock(lock); | 
 | 		if (unlikely(!atomic_dec_and_test(&kref->refcount))) { | 
 | 			mutex_unlock(lock); | 
 | 			return 0; | 
 | 		} | 
 | 		release(kref); | 
 | 		return 1; | 
 | 	} | 
 | 	return 0; | 
 | } | 
 |  | 
 | /** | 
 |  * kref_get_unless_zero - Increment refcount for object unless it is zero. | 
 |  * @kref: object. | 
 |  * | 
 |  * Return non-zero if the increment succeeded. Otherwise return 0. | 
 |  * | 
 |  * This function is intended to simplify locking around refcounting for | 
 |  * objects that can be looked up from a lookup structure, and which are | 
 |  * removed from that lookup structure in the object destructor. | 
 |  * Operations on such objects require at least a read lock around | 
 |  * lookup + kref_get, and a write lock around kref_put + remove from lookup | 
 |  * structure. Furthermore, RCU implementations become extremely tricky. | 
 |  * With a lookup followed by a kref_get_unless_zero *with return value check* | 
 |  * locking in the kref_put path can be deferred to the actual removal from | 
 |  * the lookup structure and RCU lookups become trivial. | 
 |  */ | 
 | static inline int __must_check kref_get_unless_zero(struct kref *kref) | 
 | { | 
 | 	return atomic_add_unless(&kref->refcount, 1, 0); | 
 | } | 
 | #endif /* _KREF_H_ */ |