diff --git a/Makefile.am b/Makefile.am
index bc61f62..1755f15 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1505,4 +1505,5 @@ EXTRA_DIST = packages/rpm.sh packages/rpm/rpm.spec packages/deb.sh packages/deb
              src/windows/config.h src/windows/gperftools/tcmalloc.h \
              docs/pprof.see_also src/windows/TODO \
              $(WINDOWS_PROJECTS) \
-             src/solaris/libstdc++.la
+             src/solaris/libstdc++.la \
+             CMakeLists.txt cmake
diff --git a/README b/README
index 2e604dd..714a524 100644
--- a/README
+++ b/README
@@ -20,7 +20,7 @@ https://groups.google.com/forum/#!forum/gperftools
 
 gperftools was original home for pprof program. But do note that
 original pprof (which is still included with gperftools) is now
-deprecated in favor of golang version at https://github.com/google/pprof
+deprecated in favor of Go version at https://github.com/google/pprof
 
 
 TCMALLOC
diff --git a/cmake/config.h.in b/cmake/config.h.in
index 22338da..030955a 100644
--- a/cmake/config.h.in
+++ b/cmake/config.h.in
@@ -96,7 +96,7 @@
 #cmakedefine HAVE_INTTYPES_H
 
 /* Define to 1 if you have the <libunwind.h> header file. */
-#cmakedefine01 HAVE_LIBUNWIND_H
+#cmakedefine HAVE_LIBUNWIND_H
 
 /* Define to 1 if you have the <linux/ptrace.h> header file. */
 #cmakedefine HAVE_LINUX_PTRACE_H
diff --git a/debian/changelog b/debian/changelog
index 2185778..054bffb 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+google-perftools (2.9.1+git20220208.1.fe85bbd-1) UNRELEASED; urgency=low
+
+  * New upstream snapshot.
+
+ -- Debian Janitor <janitor@jelmer.uk>  Fri, 15 Apr 2022 05:32:53 -0000
+
 google-perftools (2.9.1-1) unstable; urgency=medium
 
   * New upstream release.
diff --git a/debian/patches/20121013-pprof.1.patch b/debian/patches/20121013-pprof.1.patch
index fcd9011..1b9fe52 100644
--- a/debian/patches/20121013-pprof.1.patch
+++ b/debian/patches/20121013-pprof.1.patch
@@ -1,6 +1,8 @@
---- google-perftools.orig/docs/pprof.1   2012-02-04 04:18:22.000000000 +0900
-+++ google-perftools.orig/docs/pprof.1	2012-10-13 20:14:32.000000000 +0900
-@@ -125,7 +125,7 @@
+Index: google-perftools/docs/pprof.1
+===================================================================
+--- google-perftools.orig/docs/pprof.1
++++ google-perftools/docs/pprof.1
+@@ -125,7 +125,7 @@ is maintained as a web page called
  .B cpu_profiler.html
  and is likely installed at one of the following locations:
  .IP
diff --git a/debian/patches/perl_path.patch b/debian/patches/perl_path.patch
index 2ebfb24..9c6ba82 100644
--- a/debian/patches/perl_path.patch
+++ b/debian/patches/perl_path.patch
@@ -5,8 +5,10 @@ Last-Update: 2020-03-26
 
 ---
 
---- google-perftools-2.7.90.orig/src/pprof
-+++ google-perftools-2.7.90/src/pprof
+Index: google-perftools/src/pprof
+===================================================================
+--- google-perftools.orig/src/pprof
++++ google-perftools/src/pprof
 @@ -1,4 +1,4 @@
 -#! /usr/bin/env perl
 +#! /usr/bin/perl
diff --git a/m4/pc_from_ucontext.m4 b/m4/pc_from_ucontext.m4
index 159b01d..ffe0764 100644
--- a/m4/pc_from_ucontext.m4
+++ b/m4/pc_from_ucontext.m4
@@ -26,6 +26,7 @@ AC_DEFUN([AC_PC_FROM_UCONTEXT],
    pc_fields="$pc_fields uc_mcontext.gregs[[REG_EIP]]" # Linux (i386)
    pc_fields="$pc_fields uc_mcontext.gregs[[REG_RIP]]" # Linux (x86_64)
    pc_fields="$pc_fields uc_mcontext.sc_ip"            # Linux (ia64)
+   pc_fields="$pc_fields uc_mcontext.__pc"               # Linux (loongarch64)
    pc_fields="$pc_fields uc_mcontext.pc"               # Linux (mips)
    pc_fields="$pc_fields uc_mcontext.uc_regs->gregs[[PT_NIP]]" # Linux (ppc)
    pc_fields="$pc_fields uc_mcontext.__gregs[[REG_PC]]"  # Linux (riscv64)
diff --git a/src/base/basictypes.h b/src/base/basictypes.h
index ea87a6d..5814d5b 100644
--- a/src/base/basictypes.h
+++ b/src/base/basictypes.h
@@ -387,6 +387,8 @@ class AssignAttributeStartEnd {
 #   define CACHELINE_ALIGNED __attribute__((aligned(64)))
 # elif (defined(__e2k__))
 #   define CACHELINE_ALIGNED __attribute__((aligned(64)))
+# elif defined(__loongarch64)
+#   define CACHELINE_ALIGNED __attribute__((aligned(64)))
 # else
 #   error Could not determine cache line length - unknown architecture
 # endif
diff --git a/src/base/linux_syscall_support.h b/src/base/linux_syscall_support.h
index d6899b8..b807b11 100644
--- a/src/base/linux_syscall_support.h
+++ b/src/base/linux_syscall_support.h
@@ -131,12 +131,12 @@
 #define SYS_LINUX_SYSCALL_SUPPORT_H
 
 /* We currently only support x86-32, x86-64, ARM, MIPS, PPC/PPC64, Aarch64,
- * s390, s390x, and riscv64 on Linux.
+ * s390, s390x, riscv64, and loongarch64 on Linux.
  * Porting to other related platforms should not be difficult.
  */
 #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
      defined(__mips__) || defined(__mips64) || defined(__mips64el__) || defined(__PPC__) || \
-     defined(__aarch64__) || defined(__s390__) || defined(__riscv)) \
+     defined(__aarch64__) || defined(__s390__) || defined(__riscv)) || defined(__loongarch64) \
   && (defined(__linux))
 
 #ifndef SYS_CPLUSPLUS
@@ -541,6 +541,31 @@ struct kernel_stat {
   unsigned long      __unused4;
   unsigned long      __unused5;
 };
+
+/*From linux/include/uapi/asm-generic/stat.h */
+#elif defined(__loongarch64)
+struct kernel_stat {
+  unsigned long      st_dev;
+  unsigned long      st_ino;
+  unsigned int       st_mode;
+  unsigned int       st_nlink;
+  unsigned int       st_uid;
+  unsigned int       st_gid;
+  unsigned long      st_rdev;
+  unsigned long      __pad1;
+  long               st_size;
+  int                st_blksize;
+  int                __pad2;
+  long               st_blocks;
+  long               st_atime_;
+  unsigned long      st_atime_nsec_;
+  long               st_mtime_;
+  unsigned long      st_mtime_nsec_;
+  long               st_ctime_;
+  unsigned long      st_ctime_nsec_;
+  unsigned int       __unused4;
+  unsigned int       __unused5;
+};
 #endif
 
 
@@ -954,8 +979,24 @@ struct kernel_stat {
 # ifndef __NR_fstatat
 # define __NR_fstatat            79
 # endif
-#endif
 
+#elif defined(__loongarch64)
+#ifndef __NR_gettid
+#define __NR_gettid             178
+#endif
+#ifndef __NR_futex
+#define __NR_futex              98
+#endif
+#ifndef __NR_openat
+#define __NR_openat             56
+#endif
+#ifndef __NR_fstatat
+#define __NR_fstatat            79
+#endif
+#ifndef __NR_getdents64
+#define __NR_getdents64         61
+#endif  /* End of loongarch64 defininitions */
+#endif
 
 /* After forking, we must make sure to only call system calls.               */
 #if __BOUNDED_POINTERS__
@@ -1017,7 +1058,8 @@ struct kernel_stat {
 
   #undef  LSS_RETURN
   #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) ||        \
-       defined(__aarch64__) || defined(__s390__) || defined(__riscv))
+       defined(__aarch64__) || defined(__s390__) || defined(__riscv)) ||      \
+       defined(__loongarch64)
   /* Failing system calls return a negative result in the range of
    * -1..-4095. These are "errno" values with the sign inverted.
    */
@@ -2564,6 +2606,159 @@ struct kernel_stat {
         LSS_BODY(type, name, "r"(__a1), "r"(__a2), "r"(__a3), "r"(__a4),      \
                              "r"(__a5));                                      \
       }
+
+  #elif defined(__loongarch64)
+  #undef LSS_REG
+  #define LSS_REG(r,a) register long __a##r __asm__("$a"#r) = (long)a
+  #define LOONGARCH__SYSCALL_CLOBBERS                                         \
+          "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",      \
+          "memory"
+  #undef LSS_BODY
+  #define LSS_BODY(type,name,args...)                                         \
+        register long __a7 __asm__("$a7") = __NR_##name;                      \
+        long __res;                                                           \
+        __asm__ __volatile__ (                                                \
+                                "syscall        0\n\t"                        \
+                                : "+r" (__a0)                                 \
+                                : "r" (__a7), ##args                          \
+                                : LOONGARCH__SYSCALL_CLOBBERS);               \
+        __res = __a0;                                                         \
+        LSS_RETURN(type, __res)
+  #undef _syscall0
+  #define _syscall0(type,name)                                                \
+    type LSS_NAME(name)(void) {                                               \
+        register long __a7 __asm__("$a7") = __NR_##name;                      \
+        register long __a0 __asm__("$a0");                                    \
+        long __res;                                                           \
+        __asm__ __volatile__ (                                                \
+                                "syscall        0\n\t"                        \
+                                : "=r" (__a0)                                 \
+                                : "r" (__a7)                                  \
+                                : LOONGARCH__SYSCALL_CLOBBERS);               \
+        __res = __a0;                                                         \
+        LSS_RETURN(type, __res);                                              \
+    }
+  #undef _syscall1
+  #define _syscall1(type,name,type1,arg1)                                     \
+    type LSS_NAME(name)(type1 arg1) {                                         \
+        LSS_REG(0, arg1);                                                     \
+        LSS_BODY(type,name);                                                  \
+    }
+  #undef _syscall2
+  #define _syscall2(type,name,type1,arg1,type2,arg2)                          \
+    type LSS_NAME(name)(type1 arg1, type2 arg2) {                             \
+        LSS_REG(0,arg1);                                                      \
+        LSS_REG(1,arg2);                                                      \
+        LSS_BODY(type, name,"r"(__a1));                                       \
+    }
+  #undef _syscall3
+  #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)               \
+    type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3) {                   \
+        LSS_REG(0,arg1);                                                      \
+        LSS_REG(1,arg2);                                                      \
+        LSS_REG(2,arg3);                                                      \
+        LSS_BODY(type, name,"r"(__a1), "r"(__a2));                            \
+    }
+  #undef _syscall4
+  #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,               \
+                    type4,arg4)                                               \
+    type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3,type4 arg4) {        \
+        LSS_REG(0,arg1);                                                      \
+        LSS_REG(1,arg2);                                                      \
+        LSS_REG(2,arg3);                                                      \
+        LSS_REG(3,arg4);                                                      \
+        LSS_BODY(type,name, "r"(__a1), "r"(__a2), "r"(__a3));                 \
+    }
+  #undef _syscall5
+  #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,               \
+                    type4,arg4,type5,arg5)                                    \
+    type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3,type4 arg4,          \
+                    type5 arg5) {                                             \
+        LSS_REG(0,arg1);                                                      \
+        LSS_REG(1,arg2);                                                      \
+        LSS_REG(2,arg3);                                                      \
+        LSS_REG(3,arg4);                                                      \
+        LSS_REG(4,arg5);                                                      \
+        LSS_BODY(type,name,"r"(__a1), "r"(__a2), "r"(__a3), "r"(__a4));       \
+    }
+  #undef _syscall6
+  #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,               \
+                    type4,arg4,type5,arg5,type6,arg6)                         \
+    type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3,type4 arg4,          \
+                    type5 arg5,type6 arg6) {                                  \
+        LSS_REG(0,arg1);                                                      \
+        LSS_REG(1,arg2);                                                      \
+        LSS_REG(2,arg3);                                                      \
+        LSS_REG(3,arg4);                                                      \
+        LSS_REG(4,arg5);                                                      \
+        LSS_REG(5,arg6);                                                      \
+        LSS_BODY(type,name,"r"(__a1), "r"(__a2), "r"(__a3), "r"(__a4),        \
+                            "r"(__a5));                                       \
+    }
+
+  LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *arg), void *child_stack,
+                                 int flags, void *arg, int *parent_tidptr,
+                                 void *newtls, int *child_tidptr) {
+   long __res;
+    {
+      register int (*__fn)(void *) __asm__("$a0") = fn;
+      register void *__stack __asm__("$a1") = child_stack;
+      register int   __flags __asm__("$a2") = flags;
+      register void *__arg   __asm__("$a3") = arg;
+      register int  *__ptid  __asm__("$a4") = parent_tidptr;
+      register void *__tls   __asm__("$a5") = newtls;
+      register int  *__ctid  __asm__("$a6") = child_tidptr;
+      __asm__ __volatile__(
+                           /* Align stack to 16 or 8 bytes per the ABI.  */
+		           "bstrins.d       $a1,$zero, 3, 0\n"
+                           /*Sanity check arguments     */
+                           "beqz        $a0, 1f\n"   /* No NULL function pointers.  */
+                           "beqz        $a1, 1f\n"   /* No NULL stack pointers.  */
+
+                           /*Save argument pointer      */
+                           "addi.d      $a1, $a1, -16\n"  /* Reserve argument save space.  */
+                           "st.d        $a0, $a1, 0\n"    /* Save function pointer.  */
+                           "st.d        $a3, $a1, 8\n"    /* Save argument pointer.  */
+
+
+                           /* The syscall expects the args to be in different slots.    */
+                           "or          $a0, $a2, $zero\n"
+                           "or          $a2, $a4, $zero\n"
+                           "or          $a3, $a6, $zero\n"
+                           "or          $a4, $a5, $zero\n"
+                           /* Do the system call        */
+                           "li.d        $a7, %9\n"
+                           "syscall     0\n"
+
+                           "bnez        $a5, 2f\n"
+                           "bnez        $a7, 2f\n"
+
+			   "ld.d	$a1, $sp, 0\n"   /* Function pointer.  */
+			   "ld.d	$a0, $sp, 8\n"   /* Argument pointer.  */
+
+			   /* Call the user's function.  */
+			   "jirl	$ra, $a1, 0\n"
+
+			   /* Call exit with the function's return value.  */
+			   "li.d	$a0, %10\n"
+			   "syscall	0\n"
+
+                           "1:\n"
+			   "li.d        $t0, %1\n"
+			   "or          $a0, $t0, $zero\n"
+
+                           "2:\n"
+
+                           : "=r" (__res)
+                           : "i"(-EINVAL),
+                             "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+                             "r"(__ptid), "r"(__tls), "r"(__ctid),
+                             "i"(__NR_clone), "i"(__NR_exit)
+                           : "memory");
+    }
+LSS_RETURN(int, __res);
+  }
+
   #endif
   #define __NR__exit   __NR_exit
   #define __NR__gettid __NR_gettid
@@ -2657,7 +2852,7 @@ struct kernel_stat {
     LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu,
                          unsigned *, node, void *, unused);
   #endif
-  #if defined(__x86_64__) || defined(__aarch64__) || \
+  #if defined(__x86_64__) || defined(__aarch64__) || defined(__loongarch64) ||  \
      (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
     LSS_INLINE _syscall3(int, socket,             int,   d,
                          int,                     t, int,       p)
@@ -2691,6 +2886,7 @@ struct kernel_stat {
     }
   #endif
   #if (defined(__aarch64__)) || \
+      (defined(__loongarch64)) || \
       (defined(__mips__) \
        && (_MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32))
     LSS_INLINE int LSS_NAME(sigaction)(int signum,
diff --git a/src/base/linuxthreads.h b/src/base/linuxthreads.h
index a087628..3b488f2 100644
--- a/src/base/linuxthreads.h
+++ b/src/base/linuxthreads.h
@@ -42,7 +42,7 @@
  * related platforms should not be difficult.
  */
 #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
-     defined(__mips__) || defined(__PPC__) || defined(__aarch64__) ||       \
+     defined(__mips__) || defined(__PPC__) || defined(__aarch64__) || defined(__loongarch64) || \
      defined(__s390__)) && defined(__linux)
 
 /* Define the THREADS symbol to make sure that there is exactly one core dumper
diff --git a/src/debugallocation.cc b/src/debugallocation.cc
index 17cd452..b0f7509 100644
--- a/src/debugallocation.cc
+++ b/src/debugallocation.cc
@@ -112,6 +112,9 @@ DEFINE_bool(malloc_page_fence_never_reclaim,
             EnvToBool("TCMALLOC_PAGE_FENCE_NEVER_RECLAIM", false),
             "Enables making the virtual address space inaccessible "
             "upon a deallocation instead of returning it and reusing later.");
+DEFINE_bool(malloc_page_fence_readable,
+            EnvToBool("TCMALLOC_PAGE_FENCE_READABLE", false),
+            "Permits reads to the page fence.");
 #else
 DEFINE_bool(malloc_page_fence, false, "Not usable (requires mmap)");
 DEFINE_bool(malloc_page_fence_never_reclaim, false, "Not usable (required mmap)");
@@ -508,6 +511,7 @@ class MallocBlock {
     }
     MallocBlock* b = NULL;
     const bool use_malloc_page_fence = FLAGS_malloc_page_fence;
+    const bool malloc_page_fence_readable = FLAGS_malloc_page_fence_readable;
 #ifdef HAVE_MMAP
     if (use_malloc_page_fence) {
       // Put the block towards the end of the page and make the next page
@@ -526,7 +530,8 @@ class MallocBlock {
                 strerror(errno));
       }
       // Mark the page after the block inaccessible
-      if (mprotect(p + (num_pages - 1) * pagesize, pagesize, PROT_NONE)) {
+      if (mprotect(p + (num_pages - 1) * pagesize, pagesize,
+                   PROT_NONE|(malloc_page_fence_readable ? PROT_READ : 0))) {
         RAW_LOG(FATAL, "Guard page setup failed: %s", strerror(errno));
       }
       b = (MallocBlock*) (p + (num_pages - 1) * pagesize - sz);
diff --git a/src/libc_override_osx.h b/src/libc_override_osx.h
index 9d5d611..a4c1dde 100644
--- a/src/libc_override_osx.h
+++ b/src/libc_override_osx.h
@@ -129,6 +129,10 @@ void mz_free(malloc_zone_t* zone, void* ptr) {
   return tc_free(ptr);
 }
 
+void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) {
+  return tc_free(ptr);
+}
+
 void* mz_realloc(malloc_zone_t* zone, void* ptr, size_t size) {
   return tc_realloc(ptr, size);
 }
@@ -272,7 +276,7 @@ static void ReplaceSystemAlloc() {
     MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6
   // Switch to version 6 on OSX 10.6 to support memalign.
   tcmalloc_zone.version = 6;
-  tcmalloc_zone.free_definite_size = NULL;
+  tcmalloc_zone.free_definite_size = &mz_free_definite_size;
   tcmalloc_zone.memalign = &mz_memalign;
   tcmalloc_introspection.zone_locked = &mi_zone_locked;
 
diff --git a/src/malloc_hook_mmap_linux.h b/src/malloc_hook_mmap_linux.h
index cbf3782..4362fe0 100644
--- a/src/malloc_hook_mmap_linux.h
+++ b/src/malloc_hook_mmap_linux.h
@@ -54,6 +54,7 @@
 #if defined(__x86_64__) \
     || defined(__PPC64__) \
     || defined(__aarch64__) \
+    || defined(__loongarch64) \
     || (defined(_MIPS_SIM) && (_MIPS_SIM == _ABI64 || _MIPS_SIM == _ABIN32)) \
     || defined(__s390__) || (defined(__riscv) && __riscv_xlen == 64) \
     || defined(__e2k__)
diff --git a/src/stacktrace.cc b/src/stacktrace.cc
index 2a2c648..32b16fa 100644
--- a/src/stacktrace.cc
+++ b/src/stacktrace.cc
@@ -219,7 +219,7 @@ static GetStackImplementation *all_impls[] = {
 
 // ppc and i386 implementations prefer arch-specific asm implementations.
 // arm's asm implementation is broken
-#if defined(__i386__) || defined(__ppc__) || defined(__PPC__)
+#if defined(__i386__) || defined(__ppc__) || defined(__PPC__) || defined(__loongarch64)
 #if !defined(NO_FRAME_POINTER)
 #define TCMALLOC_DONT_PREFER_LIBUNWIND
 #endif
diff --git a/src/system-alloc.cc b/src/system-alloc.cc
index e84a5f1..439ec69 100644
--- a/src/system-alloc.cc
+++ b/src/system-alloc.cc
@@ -511,7 +511,7 @@ void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size,
 }
 
 bool TCMalloc_SystemRelease(void* start, size_t length) {
-#ifdef MADV_FREE
+#if defined(FREE_MMAP_PROT_NONE) && defined(HAVE_MMAP) || defined(MADV_FREE)
   if (FLAGS_malloc_devmem_start) {
     // It's not safe to use MADV_FREE/MADV_DONTNEED if we've been
     // mapping /dev/mem for heap memory.
@@ -536,20 +536,40 @@ bool TCMalloc_SystemRelease(void* start, size_t length) {
   ASSERT(new_end <= end);
 
   if (new_end > new_start) {
-    int result;
+    bool result, retry;
     do {
-      result = madvise(reinterpret_cast<char*>(new_start),
+#if defined(FREE_MMAP_PROT_NONE) && defined(HAVE_MMAP)
+      // mmap PROT_NONE is similar to munmap by freeing backing pages by
+      // physical memory except using MAP_FIXED keeps virtual memory range
+      // reserved to be remapped back later
+      void* ret = mmap(reinterpret_cast<char*>(new_start), new_end - new_start,
+          PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0);
+
+      result = ret != MAP_FAILED;
+#else
+      int ret = madvise(reinterpret_cast<char*>(new_start),
           new_end - new_start, MADV_FREE);
-    } while (result == -1 && errno == EAGAIN);
 
-    return result != -1;
-  }
+      result = ret != -1;
 #endif
+      retry = errno == EAGAIN;
+    } while (!result && retry);
+
+    return result;
+  }
+#endif 
   return false;
 }
 
 void TCMalloc_SystemCommit(void* start, size_t length) {
+#if defined(FREE_MMAP_PROT_NONE) && defined(HAVE_MMAP)
+  // remaping as MAP_FIXED to same address assuming span size did not change 
+  // since last TCMalloc_SystemRelease
+  mmap(start, length, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED,
+       -1, 0);
+#else
   // Nothing to do here.  TCMalloc_SystemRelease does not alter pages
   // such that they need to be re-committed before they can be used by the
   // application.
+#endif
 }
diff --git a/src/tcmalloc.cc b/src/tcmalloc.cc
index 9ec663e..b9c5408 100644
--- a/src/tcmalloc.cc
+++ b/src/tcmalloc.cc
@@ -178,7 +178,7 @@ DECLARE_int64(tcmalloc_heap_limit_mb);
 // jump. I am not able to reproduce that anymore.
 #if !defined(__i386__) && !defined(__x86_64__) && \
     !defined(__ppc__) && !defined(__PPC__) && \
-    !defined(__aarch64__) && !defined(__mips__) && !defined(__arm__)
+    !defined(__aarch64__) && !defined(__mips__) && !defined(__arm__) && !defined(__loongarch64)
 #undef TCMALLOC_NO_ALIASES
 #define TCMALLOC_NO_ALIASES
 #endif