Projects STRLCPY criu Commits 8d2ece72
🤬
  • mount: don't collect mounts when they are not required

    Currently we collect mounts to clean up a mount namespace,
    but it isn't required when we are going to call pivot_root.
    
    https://github.com/docker/docker/issues/31663
    
    Signed-off-by: Andrei Vagin <[email protected]>
  • Loading...
  • Andrei Vagin committed 7 years ago
    8d2ece72
    1 parent 40738f7b
  • ■ ■ ■ ■ ■ ■
    criu/mount.c
    skipped 2900 lines
    2901 2901  int prepare_mnt_ns(void)
    2902 2902  {
    2903 2903   int ret = -1, rst = -1;
    2904  - struct mount_info *old;
    2905 2904   struct ns_id ns = { .type = NS_CRIU, .ns_pid = PROC_SELF, .nd = &mnt_ns_desc };
    2906 2905   struct ns_id *nsid;
    2907 2906   
    skipped 2 lines
    2910 2909   
    2911 2910   pr_info("Restoring mount namespace\n");
    2912 2911   
    2913  - old = collect_mntinfo(&ns, false);
    2914  - if (old == NULL)
    2915  - return -1;
     2912 + if (!opts.root) {
     2913 + struct mount_info *old;
    2916 2914   
    2917  - if (!opts.root) {
    2918 2915   if (chdir("/")) {
    2919 2916   pr_perror("chdir(\"/\") failed");
    2920 2917   return -1;
    2921 2918   }
    2922 2919   
     2920 + old = collect_mntinfo(&ns, false);
     2921 + if (old == NULL)
     2922 + return -1;
    2923 2923   /*
    2924 2924   * The new mount namespace is filled with the mountpoint
    2925 2925   * clones from the original one. We have to umount them
    2926 2926   * prior to recreating new ones.
    2927 2927   */
    2928 2928   pr_info("Cleaning mount namespace\n");
    2929  - if (mnt_tree_for_each_reverse(ns.mnt.mntinfo_tree, do_umount_one))
     2929 + if (mnt_tree_for_each_reverse(ns.mnt.mntinfo_tree, do_umount_one)) {
     2930 + free_mntinfo(old);
    2930 2931   return -1;
    2931  - }
     2932 + }
    2932 2933   
    2933  - free_mntinfo(old);
     2934 + free_mntinfo(old);
     2935 + }
    2934 2936   
    2935 2937   ret = populate_mnt_ns();
    2936 2938   if (ret)
    skipped 340 lines
Please wait...
Page is in error, reload to recover