@@ -396,6 +396,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_root *root, const char *name,
396396
397397 atomic_set (& kn -> count , 1 );
398398 atomic_set (& kn -> active , KN_DEACTIVATED_BIAS );
399+ kn -> deact_depth = 1 ;
399400 RB_CLEAR_NODE (& kn -> rb );
400401
401402 kn -> name = name ;
@@ -461,6 +462,7 @@ int kernfs_add_one(struct kernfs_node *kn, struct kernfs_node *parent)
461462
462463 /* Mark the entry added into directory tree */
463464 atomic_sub (KN_DEACTIVATED_BIAS , & kn -> active );
465+ kn -> deact_depth -- ;
464466 ret = 0 ;
465467out_unlock :
466468 mutex_unlock (& kernfs_mutex );
@@ -561,6 +563,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
561563 }
562564
563565 atomic_sub (KN_DEACTIVATED_BIAS , & kn -> active );
566+ kn -> deact_depth -- ;
564567 kn -> priv = priv ;
565568 kn -> dir .root = root ;
566569
@@ -773,7 +776,8 @@ static void __kernfs_deactivate(struct kernfs_node *kn)
773776 /* prevent any new usage under @kn by deactivating all nodes */
774777 pos = NULL ;
775778 while ((pos = kernfs_next_descendant_post (pos , kn ))) {
776- if (atomic_read (& pos -> active ) >= 0 ) {
779+ if (!pos -> deact_depth ++ ) {
780+ WARN_ON_ONCE (atomic_read (& pos -> active ) < 0 );
777781 atomic_add (KN_DEACTIVATED_BIAS , & pos -> active );
778782 pos -> flags |= KERNFS_JUST_DEACTIVATED ;
779783 }
@@ -797,6 +801,118 @@ static void __kernfs_deactivate(struct kernfs_node *kn)
797801 }
798802}
799803
804+ static void __kernfs_reactivate (struct kernfs_node * kn )
805+ {
806+ struct kernfs_node * pos ;
807+
808+ lockdep_assert_held (& kernfs_mutex );
809+
810+ pos = NULL ;
811+ while ((pos = kernfs_next_descendant_post (pos , kn ))) {
812+ if (!-- pos -> deact_depth ) {
813+ WARN_ON_ONCE (atomic_read (& pos -> active ) >= 0 );
814+ atomic_sub (KN_DEACTIVATED_BIAS , & pos -> active );
815+ }
816+ WARN_ON_ONCE (pos -> deact_depth < 0 );
817+ }
818+
819+ /* some nodes reactivated, kick get_active waiters */
820+ wake_up_all (& kernfs_root (kn )-> deactivate_waitq );
821+ }
822+
823+ static void __kernfs_deactivate_self (struct kernfs_node * kn )
824+ {
825+ /*
826+ * Take out ourself out of the active ref dependency chain and
827+ * deactivate. If we're called without an active ref, lockdep will
828+ * complain.
829+ */
830+ kernfs_put_active (kn );
831+ __kernfs_deactivate (kn );
832+ }
833+
834+ static void __kernfs_reactivate_self (struct kernfs_node * kn )
835+ {
836+ __kernfs_reactivate (kn );
837+ /*
838+ * Restore active ref dropped by deactivate_self() so that it's
839+ * balanced on return. put_active() will soon be called on @kn, so
840+ * this can't break anything regardless of @kn's state.
841+ */
842+ atomic_inc (& kn -> active );
843+ if (kernfs_lockdep (kn ))
844+ rwsem_acquire (& kn -> dep_map , 0 , 1 , _RET_IP_ );
845+ }
846+
847+ /**
848+ * kernfs_deactivate - deactivate subtree of a node
849+ * @kn: kernfs_node to deactivate subtree of
850+ *
851+ * Deactivate the subtree of @kn. On return, there's no active operation
852+ * going on under @kn and creation or renaming of a node under @kn is
853+ * blocked until @kn is reactivated or removed. This function can be
854+ * called multiple times and nests properly. Each invocation should be
855+ * paired with kernfs_reactivate().
856+ *
857+ * For a kernfs user which uses simple locking, the subsystem lock would
858+ * nest inside active reference. This becomes problematic if the user
859+ * tries to remove nodes while holding the subystem lock as it would create
860+ * a reverse locking dependency from the subsystem lock to active ref.
861+ * This function can be used to break such reverse dependency. The user
862+ * can call this function outside the subsystem lock and then proceed to
863+ * invoke kernfs_remove() while holding the subsystem lock without
864+ * introducing such reverse dependency.
865+ */
866+ void kernfs_deactivate (struct kernfs_node * kn )
867+ {
868+ mutex_lock (& kernfs_mutex );
869+ __kernfs_deactivate (kn );
870+ mutex_unlock (& kernfs_mutex );
871+ }
872+
873+ /**
874+ * kernfs_reactivate - reactivate subtree of a node
875+ * @kn: kernfs_node to reactivate subtree of
876+ *
877+ * Undo kernfs_deactivate().
878+ */
879+ void kernfs_reactivate (struct kernfs_node * kn )
880+ {
881+ mutex_lock (& kernfs_mutex );
882+ __kernfs_reactivate (kn );
883+ mutex_unlock (& kernfs_mutex );
884+ }
885+
886+ /**
887+ * kernfs_deactivate_self - deactivate subtree of a node from its own method
888+ * @kn: the self kernfs_node to deactivate subtree of
889+ *
890+ * The caller must be running off of a kernfs operation which is invoked
891+ * with an active reference - e.g. one of kernfs_ops. Once this function
892+ * is called, @kn may be removed by someone else while the enclosing method
893+ * is in progress. Other than that, this function is equivalent to
894+ * kernfs_deactivate() and should be paired with kernfs_reactivate_self().
895+ */
896+ void kernfs_deactivate_self (struct kernfs_node * kn )
897+ {
898+ mutex_lock (& kernfs_mutex );
899+ __kernfs_deactivate_self (kn );
900+ mutex_unlock (& kernfs_mutex );
901+ }
902+
903+ /**
904+ * kernfs_reactivate_self - reactivate subtree of a node from its own method
905+ * @kn: the self kernfs_node to reactivate subtree of
906+ *
907+ * Undo kernfs_deactivate_self().
908+ */
909+ void kernfs_reactivate_self (struct kernfs_node * kn )
910+ {
911+ mutex_lock (& kernfs_mutex );
912+ __kernfs_reactivate_self (kn );
913+ mutex_unlock (& kernfs_mutex );
914+ }
915+
800916static void __kernfs_remove (struct kernfs_node * kn )
801917{
802918 struct kernfs_root * root = kernfs_root (kn );
0 commit comments