Send patches - preferably formatted by git format-patch - to patches at archlinux32 dot org.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--extra/sbcl/PKGBUILD7
-rw-r--r--extra/sbcl/x86-linux-os.patch112
2 files changed, 119 insertions, 0 deletions
diff --git a/extra/sbcl/PKGBUILD b/extra/sbcl/PKGBUILD
new file mode 100644
index 00000000..c2feda43
--- /dev/null
+++ b/extra/sbcl/PKGBUILD
@@ -0,0 +1,7 @@
+source+=('x86-linux-os.patch')
+sha256sum+=('3ba738c461a8836171b424b5881ad5aaa63071f54bc5d207e72cae4c1be3738a')
+
+prepare() {
+ cd "${srcdir}/${pkgname}-${pkgver}"
+ patch -Np1 -i ../x86-linux-os.patch
+}
diff --git a/extra/sbcl/x86-linux-os.patch b/extra/sbcl/x86-linux-os.patch
new file mode 100644
index 00000000..c1fd12b7
--- /dev/null
+++ b/extra/sbcl/x86-linux-os.patch
@@ -0,0 +1,112 @@
+--- sbcl-1.4.10/src/runtime/x86-linux-os.c.orig 2018-09-07 03:07:10.927918139 +0000
++++ sbcl-1.4.10/src/runtime/x86-linux-os.c 2018-09-07 03:05:10.471896471 +0000
+@@ -45,73 +45,43 @@
+ #include <linux/version.h>
+ #include "thread.h" /* dynamic_values_bytes */
+
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+-#define user_desc modify_ldt_ldt_s
+-#endif
+-
+-static inline int modify_ldt(int func, void *ptr, unsigned long bytecount)
++static inline int set_thread_area(struct user_desc *u_info)
+ {
+- return syscall(SYS_modify_ldt, func, ptr, bytecount);
++ return syscall(SYS_set_thread_area, u_info);
+ }
+
+ #include "validate.h"
+ size_t os_vm_page_size;
+
+-u32 local_ldt_copy[LDT_ENTRIES*LDT_ENTRY_SIZE/sizeof(u32)];
+-
+-/* This is never actually called, but it's great for calling from gdb when
+- * users have thread-related problems that maintainers can't duplicate */
+-
+-void debug_get_ldt()
+-{
+- int n=modify_ldt (0, local_ldt_copy, sizeof local_ldt_copy);
+- printf("%d bytes in ldt: print/x local_ldt_copy\n", n);
+-}
+-
+-#ifdef LISP_FEATURE_SB_THREAD
+-pthread_mutex_t modify_ldt_lock = PTHREAD_MUTEX_INITIALIZER;
+-#endif
+-
+ int arch_os_thread_init(struct thread *thread) {
+ stack_t sigstack;
+ #ifdef LISP_FEATURE_SB_THREAD
+- struct user_desc ldt_entry = {
+- 1, 0, 0, /* index, address, length filled in later */
+- 1, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 1
++ struct user_desc desc = {
++ 0, (unsigned long) thread, dynamic_values_bytes,
++ 1, 0, 0, 1, 0, 1
+ };
+- int n;
+- thread_mutex_lock(&modify_ldt_lock);
+- n=modify_ldt(0,local_ldt_copy,sizeof local_ldt_copy);
+- /* get next free ldt entry */
+-
+- if(n) {
+- u32 *p;
+- for(n=0,p=local_ldt_copy;*p;p+=LDT_ENTRY_SIZE/sizeof(u32))
+- n++;
+- }
+- ldt_entry.entry_number=n;
+- ldt_entry.base_addr=(unsigned long) thread;
+- ldt_entry.limit=dynamic_values_bytes;
+- ldt_entry.limit_in_pages=0;
+- if (modify_ldt (1, &ldt_entry, sizeof (ldt_entry)) != 0) {
+- thread_mutex_unlock(&modify_ldt_lock);
+- /* modify_ldt call failed: something magical is not happening */
++
++ static int entry_number = -1;
++
++ desc.entry_number = entry_number;
++
++ if (set_thread_area(&desc) != 0) {
+ return 0;
+ }
++
++ entry_number = desc.entry_number;
++
+ __asm__ __volatile__ ("movw %w0, %%fs" : : "q"
+- ((n << 3) /* selector number */
+- + (1 << 2) /* TI set = LDT */
+- + 3)); /* privilege level */
+- thread->tls_cookie=n;
+- pthread_mutex_unlock(&modify_ldt_lock);
++ ((entry_number << 3) + 3));
+
+- if(n<0) return 0;
++ if(entry_number < 0) return 0;
+ #ifdef LISP_FEATURE_GCC_TLS
+ current_thread = thread;
+ #else
+ pthread_setspecific(specials,thread);
+ #endif
+ #endif
++
+ #ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
+ /* Signal handlers are run on the control stack, so if it is exhausted
+ * we had better use an alternate stack for whatever signal tells us
+@@ -136,17 +106,7 @@
+ */
+
+ int arch_os_thread_cleanup(struct thread *thread) {
+- struct user_desc ldt_entry = {
+- 0, 0, 0,
+- 0, MODIFY_LDT_CONTENTS_DATA, 0, 0, 0, 0
+- };
+- int result;
+-
+- ldt_entry.entry_number=thread->tls_cookie;
+- thread_mutex_lock(&modify_ldt_lock);
+- result = modify_ldt(1, &ldt_entry, sizeof (ldt_entry));
+- thread_mutex_unlock(&modify_ldt_lock);
+- return result;
++ return 1;
+ }
+
+