Codebase list iortcw / 7950bc0
Imported Upstream version 1.42b+20151119+dfsg1 Simon McVittie 8 years ago
32 changed file(s) with 2784 addition(s) and 193 deletion(s). Raw diff Collapse all Expand all
66 COMPILE_PLATFORM=$(shell uname|sed -e s/_.*//|tr '[:upper:]' '[:lower:]'|sed -e 's/\//_/g')
77
88 COMPILE_ARCH=$(shell uname -m | sed -e s/i.86/i386/ | sed -e 's/^arm.*/arm/')
9
10 ARM_VER_CHECK=$(shell uname -m)
911
1012 ifeq ($(COMPILE_PLATFORM),sunos)
1113 # Solaris uname and GNU uname differ
126128 export CROSS_COMPILING
127129
128130 ifndef VERSION
129 VERSION=1.42c
131 VERSION=1.42d
130132 endif
131133
132134 ifndef CLIENTBIN
214216 endif
215217
216218 ifndef USE_FREETYPE
217 USE_FREETYPE=0
219 USE_FREETYPE=1
218220 endif
219221
220222 ifndef USE_INTERNAL_LIBS
375377 ifeq ($(ARCH),x86_64)
376378 OPTIMIZEVM = -O3
377379 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
378 HAVE_VM_COMPILED = true
379 else
380 endif
380381 ifeq ($(ARCH),x86)
381382 OPTIMIZEVM = -O3 -march=i586
382383 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
383 HAVE_VM_COMPILED=true
384 else
384 endif
385385 ifeq ($(ARCH),ppc)
386386 BASE_CFLAGS += -maltivec
387 HAVE_VM_COMPILED=true
388387 endif
389388 ifeq ($(ARCH),ppc64)
390389 BASE_CFLAGS += -maltivec
391 HAVE_VM_COMPILED=true
392390 endif
393391 ifeq ($(ARCH),sparc)
394392 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
395393 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
396 HAVE_VM_COMPILED=true
394 endif
395 ifeq ($(ARCH),sparc64)
396 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
397 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
397398 endif
398399 ifeq ($(ARCH),alpha)
399400 # According to http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=410555
400401 # -ffast-math will cause the client to die with SIGFPE on Alpha
401402 OPTIMIZE = $(OPTIMIZEVM)
402 endif
403 endif
404403 endif
405404
406405 SHLIBEXT=so
463462 #############################################################################
464463
465464 ifeq ($(PLATFORM),darwin)
466 HAVE_VM_COMPILED=true
467465 LIBS = -framework Cocoa
468466 CLIENT_LIBS=
469467 RENDERER_LIBS=
643641 ifeq ($(ARCH),x86_64)
644642 OPTIMIZEVM = -O3
645643 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
646 HAVE_VM_COMPILED = true
647644 FILE_ARCH=x64
648645 endif
649646 ifeq ($(ARCH),x86)
650647 OPTIMIZEVM = -O3 -march=i586
651648 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
652 HAVE_VM_COMPILED = true
653649 endif
654650
655651 SHLIBEXT=dll
745741 -Wall -fno-strict-aliasing \
746742 -DUSE_ICON -DMAP_ANONYMOUS=MAP_ANON
747743 CLIENT_CFLAGS += $(SDL_CFLAGS)
748 HAVE_VM_COMPILED = true
749744
750745 OPTIMIZEVM = -O3
751746 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
806801 ifeq ($(ARCH),x86_64)
807802 OPTIMIZEVM = -O3
808803 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
809 HAVE_VM_COMPILED = true
810 else
804 endif
811805 ifeq ($(ARCH),x86)
812806 OPTIMIZEVM = -O3 -march=i586
813807 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
814 HAVE_VM_COMPILED=true
815 else
808 endif
816809 ifeq ($(ARCH),ppc)
817810 BASE_CFLAGS += -maltivec
818 HAVE_VM_COMPILED=true
819811 endif
820812 ifeq ($(ARCH),ppc64)
821813 BASE_CFLAGS += -maltivec
822 HAVE_VM_COMPILED=true
814 endif
815 ifeq ($(ARCH),sparc)
816 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
817 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
823818 endif
824819 ifeq ($(ARCH),sparc64)
825820 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
826821 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
827 HAVE_VM_COMPILED=true
828822 endif
829823 ifeq ($(ARCH),alpha)
830824 # According to http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=410555
831825 # -ffast-math will cause the client to die with SIGFPE on Alpha
832826 OPTIMIZE = $(OPTIMIZEVM)
833 endif
834 endif
835827 endif
836828
837829 ifeq ($(USE_CURL),1)
881873
882874 BASE_CFLAGS = -Wall -fno-strict-aliasing
883875
884 ifeq ($(ARCH),x86)
885 HAVE_VM_COMPILED=true
886 endif
887
888876 BUILD_CLIENT = 0
889877 else # ifeq netbsd
890878
942930
943931 ifeq ($(ARCH),sparc)
944932 OPTIMIZEVM += -O3 -mtune=ultrasparc3 -mv8plus -mno-faster-structs
945 HAVE_VM_COMPILED=true
946 else
933 endif
947934 ifeq ($(ARCH),x86)
948935 OPTIMIZEVM += -march=i586
949 HAVE_VM_COMPILED=true
950936 BASE_CFLAGS += -m32
951937 CLIENT_CFLAGS += -I/usr/X11/include/NVIDIA
952938 CLIENT_LDFLAGS += -L/usr/X11/lib/NVIDIA -R/usr/X11/lib/NVIDIA
953 endif
954939 endif
955940
956941 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
994979
995980 ifndef RANLIB
996981 RANLIB=ranlib
982 endif
983
984 ifndef HAVE_VM_COMPILED
985 HAVE_VM_COMPILED=false
986 endif
987
988 ifneq ($(findstring $(ARCH),x86 x86_64 ppc ppc64 sparc sparc64),)
989 HAVE_VM_COMPILED=true
990 endif
991
992 ifeq ($(ARM_VER_CHECK),armv7l)
993 HAVE_VM_COMPILED=true
997994 endif
998995
999996 ifneq ($(HAVE_VM_COMPILED),true)
13331330
13341331 NAKED_TARGETS=$(shell echo $(TARGETS) | sed -e "s!$(B)/!!g")
13351332
1336 print_list=@for i in $(1); \
1333 print_list=-@for i in $(1); \
13371334 do \
13381335 echo " $$i"; \
13391336 done
13551352 @echo " VERSION: $(VERSION)"
13561353 @echo " COMPILE_PLATFORM: $(COMPILE_PLATFORM)"
13571354 @echo " COMPILE_ARCH: $(COMPILE_ARCH)"
1355 @echo " HAVE_VM_COMPILED: $(HAVE_VM_COMPILED)"
13581356 @echo " CC: $(CC)"
13591357 @echo " CXX: $(CXX)"
13601358 ifdef MINGW
21452143
21462144 ifeq ($(HAVE_VM_COMPILED),true)
21472145 ifneq ($(findstring $(ARCH),x86 x86_64),)
2148 Q3OBJ += \
2149 $(B)/client/vm_x86.o
2146 Q3OBJ += $(B)/client/vm_x86.o
21502147 endif
21512148 ifneq ($(findstring $(ARCH),ppc ppc64),)
21522149 Q3OBJ += $(B)/client/vm_powerpc.o $(B)/client/vm_powerpc_asm.o
21532150 endif
2154 ifeq ($(ARCH),sparc)
2151 ifneq ($(findstring $(ARCH),sparc sparc64),)
21552152 Q3OBJ += $(B)/client/vm_sparc.o
2153 endif
2154 ifeq ($(ARM_VER_CHECK),armv7l)
2155 Q3OBJ += $(B)/client/vm_armv7l.o
21562156 endif
21572157 endif
21582158
23182318
23192319 ifeq ($(HAVE_VM_COMPILED),true)
23202320 ifneq ($(findstring $(ARCH),x86 x86_64),)
2321 Q3DOBJ += \
2322 $(B)/ded/vm_x86.o
2321 Q3DOBJ += $(B)/ded/vm_x86.o
23232322 endif
23242323 ifneq ($(findstring $(ARCH),ppc ppc64),)
23252324 Q3DOBJ += $(B)/ded/vm_powerpc.o $(B)/ded/vm_powerpc_asm.o
23262325 endif
2327 ifeq ($(ARCH),sparc)
2326 ifneq ($(findstring $(ARCH),sparc sparc64),)
23282327 Q3DOBJ += $(B)/ded/vm_sparc.o
2328 endif
2329 ifeq ($(ARM_VER_CHECK),armv7l)
2330 Q3DOBJ += $(B)/ded/vm_armv7l.o
23292331 endif
23302332 endif
23312333
19311931 char message[MAX_RCON_MESSAGE];
19321932 netadr_t to;
19331933
1934 if ( !rcon_client_password->string ) {
1934 if ( !rcon_client_password->string[0] ) {
19351935 Com_Printf ("You must set 'rconpassword' before\n"
19361936 "issuing an rcon command.\n");
19371937 return;
23572357 void Com_ExecuteCfg(void)
23582358 {
23592359 Cbuf_ExecuteText(EXEC_NOW, "exec default.cfg\n");
2360 if ( FS_ReadFile( "language.cfg", NULL ) > 0 ) {
2361 Cbuf_ExecuteText(EXEC_APPEND, "exec language.cfg\n");
2362 } else if ( FS_ReadFile( "Language.cfg", NULL ) > 0 ) {
2363 Cbuf_ExecuteText(EXEC_APPEND, "exec Language.cfg\n");
2364 }
23602365 Cbuf_Execute(); // Always execute after exec to prevent text buffer overflowing
23612366
23622367 if(!Com_SafeMode())
723723 Q_strncpyz( fsh[f].name, filename, sizeof( fsh[f].name ) );
724724
725725 // don't let sound stutter
726 S_ClearSoundBuffer();
726 // S_ClearSoundBuffer();
727727
728728 // search homepath
729729 ospath = FS_BuildOSPath( fs_homepath->string, filename, "" );
800800 }
801801
802802 // don't let sound stutter
803 S_ClearSoundBuffer();
803 // S_ClearSoundBuffer();
804804
805805 from_ospath = FS_BuildOSPath( fs_homepath->string, from, "" );
806806 to_ospath = FS_BuildOSPath( fs_homepath->string, to, "" );
835835 }
836836
837837 // don't let sound stutter
838 S_ClearSoundBuffer();
838 // S_ClearSoundBuffer();
839839
840840 from_ospath = FS_BuildOSPath( fs_homepath->string, fs_gamedir, from );
841841 to_ospath = FS_BuildOSPath( fs_homepath->string, fs_gamedir, to );
943943 Q_strncpyz( fsh[f].name, filename, sizeof( fsh[f].name ) );
944944
945945 // don't let sound stutter
946 S_ClearSoundBuffer();
946 // S_ClearSoundBuffer();
947947
948948 ospath = FS_BuildOSPath( fs_homepath->string, fs_gamedir, filename );
949949
986986 Q_strncpyz( fsh[f].name, filename, sizeof( fsh[f].name ) );
987987
988988 // don't let sound stutter
989 S_ClearSoundBuffer();
989 // S_ClearSoundBuffer();
990990
991991 ospath = FS_BuildOSPath( fs_homepath->string, fs_gamedir, filename );
992992
7878 #define LEGACY_HEARTBEAT_FOR_MASTER "Wolfenstein-1"
7979
8080 #ifndef PRODUCT_VERSION
81 #define PRODUCT_VERSION "1.42c"
81 #define PRODUCT_VERSION "1.42d"
8282 #endif
8383
8484 #ifndef OLD_PRODUCT_VERSION
0 /*
1 ===========================================================================
2 Copyright (C) 2009 David S. Miller <davem@davemloft.net>
3 Copyright (C) 2013,2014 SUSE Linux Products GmbH
4
5 This file is part of Quake III Arena source code.
6
7 Quake III Arena source code is free software; you can redistribute it
8 and/or modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 Quake III Arena source code is distributed in the hope that it will be
13 useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with Quake III Arena source code; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 ===========================================================================
21
22 ARMv7l VM by Ludwig Nussel <ludwig.nussel@suse.de>
23
24 TODO: optimization
25
26 Docu:
27 http://www.coranac.com/tonc/text/asm.htm
28 http://www.heyrick.co.uk/armwiki/Category:Opcodes
29 ARMv7-A_ARMv7-R_DDI0406_2007.pdf
30 */
31
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 #include <time.h>
36 #include <stddef.h>
37
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <fcntl.h>
41
42 #include "vm_local.h"
43 #define R0 0
44 #define R1 1
45 #define R2 2
46 #define R3 3
47 #define R4 4
48
49 #define R12 12
50
51 #define FP 11
52 #define SP 13
53 #define LR 14
54 #define PC 15
55
56 #define APSR_nzcv 15
57
58 #define S14 14
59 #define S15 15
60
61 #define rOPSTACK 5
62 #define rOPSTACKBASE 6
63 #define rCODEBASE 7
64 #define rPSTACK 8
65 #define rDATABASE 9
66 #define rDATAMASK 10
67
68 #define bit(x) (1<<x)
69
70 /* arm eabi, builtin gcc functions */
71 int __aeabi_idiv (int, int);
72 unsigned __aeabi_uidiv (unsigned, unsigned);
73 void __aeabi_idivmod(void);
74 void __aeabi_uidivmod(void);
75
76 /* exit() won't be called but use it because it is marked with noreturn */
77 #define DIE( reason, args... ) \
78 do { \
79 Com_Error(ERR_DROP, "vm_arm compiler error: " reason, ##args); \
80 exit(1); \
81 } while(0)
82
83 /*
84 * opcode information table:
85 * - length of immediate value
86 * - returned register type
87 * - required register(s) type
88 */
89 #define opImm0 0x0000 /* no immediate */
90 #define opImm1 0x0001 /* 1 byte immadiate value after opcode */
91 #define opImm4 0x0002 /* 4 bytes immediate value after opcode */
92
93 #define opRet0 0x0000 /* returns nothing */
94 #define opRetI 0x0004 /* returns integer */
95 #define opRetF 0x0008 /* returns float */
96 #define opRetIF (opRetI | opRetF) /* returns integer or float */
97
98 #define opArg0 0x0000 /* requires nothing */
99 #define opArgI 0x0010 /* requires integer(s) */
100 #define opArgF 0x0020 /* requires float(s) */
101 #define opArgIF (opArgI | opArgF) /* requires integer or float */
102
103 #define opArg2I 0x0040 /* requires second argument, integer */
104 #define opArg2F 0x0080 /* requires second argument, float */
105 #define opArg2IF (opArg2I | opArg2F) /* requires second argument, integer or float */
106
107 static const unsigned char vm_opInfo[256] =
108 {
109 [OP_UNDEF] = opImm0,
110 [OP_IGNORE] = opImm0,
111 [OP_BREAK] = opImm0,
112 [OP_ENTER] = opImm4,
113 /* OP_LEAVE has to accept floats, they will be converted to ints */
114 [OP_LEAVE] = opImm4 | opRet0 | opArgIF,
115 /* only STORE4 and POP use values from OP_CALL,
116 * no need to convert floats back */
117 [OP_CALL] = opImm0 | opRetI | opArgI,
118 [OP_PUSH] = opImm0 | opRetIF,
119 [OP_POP] = opImm0 | opRet0 | opArgIF,
120 [OP_CONST] = opImm4 | opRetIF,
121 [OP_LOCAL] = opImm4 | opRetI,
122 [OP_JUMP] = opImm0 | opRet0 | opArgI,
123
124 [OP_EQ] = opImm4 | opRet0 | opArgI | opArg2I,
125 [OP_NE] = opImm4 | opRet0 | opArgI | opArg2I,
126 [OP_LTI] = opImm4 | opRet0 | opArgI | opArg2I,
127 [OP_LEI] = opImm4 | opRet0 | opArgI | opArg2I,
128 [OP_GTI] = opImm4 | opRet0 | opArgI | opArg2I,
129 [OP_GEI] = opImm4 | opRet0 | opArgI | opArg2I,
130 [OP_LTU] = opImm4 | opRet0 | opArgI | opArg2I,
131 [OP_LEU] = opImm4 | opRet0 | opArgI | opArg2I,
132 [OP_GTU] = opImm4 | opRet0 | opArgI | opArg2I,
133 [OP_GEU] = opImm4 | opRet0 | opArgI | opArg2I,
134 [OP_EQF] = opImm4 | opRet0 | opArgF | opArg2F,
135 [OP_NEF] = opImm4 | opRet0 | opArgF | opArg2F,
136 [OP_LTF] = opImm4 | opRet0 | opArgF | opArg2F,
137 [OP_LEF] = opImm4 | opRet0 | opArgF | opArg2F,
138 [OP_GTF] = opImm4 | opRet0 | opArgF | opArg2F,
139 [OP_GEF] = opImm4 | opRet0 | opArgF | opArg2F,
140
141 [OP_LOAD1] = opImm0 | opRetI | opArgI,
142 [OP_LOAD2] = opImm0 | opRetI | opArgI,
143 [OP_LOAD4] = opImm0 | opRetIF| opArgI,
144 [OP_STORE1] = opImm0 | opRet0 | opArgI | opArg2I,
145 [OP_STORE2] = opImm0 | opRet0 | opArgI | opArg2I,
146 [OP_STORE4] = opImm0 | opRet0 | opArgIF| opArg2I,
147 [OP_ARG] = opImm1 | opRet0 | opArgIF,
148 [OP_BLOCK_COPY] = opImm4 | opRet0 | opArgI | opArg2I,
149
150 [OP_SEX8] = opImm0 | opRetI | opArgI,
151 [OP_SEX16] = opImm0 | opRetI | opArgI,
152 [OP_NEGI] = opImm0 | opRetI | opArgI,
153 [OP_ADD] = opImm0 | opRetI | opArgI | opArg2I,
154 [OP_SUB] = opImm0 | opRetI | opArgI | opArg2I,
155 [OP_DIVI] = opImm0 | opRetI | opArgI | opArg2I,
156 [OP_DIVU] = opImm0 | opRetI | opArgI | opArg2I,
157 [OP_MODI] = opImm0 | opRetI | opArgI | opArg2I,
158 [OP_MODU] = opImm0 | opRetI | opArgI | opArg2I,
159 [OP_MULI] = opImm0 | opRetI | opArgI | opArg2I,
160 [OP_MULU] = opImm0 | opRetI | opArgI | opArg2I,
161 [OP_BAND] = opImm0 | opRetI | opArgI | opArg2I,
162 [OP_BOR] = opImm0 | opRetI | opArgI | opArg2I,
163 [OP_BXOR] = opImm0 | opRetI | opArgI | opArg2I,
164 [OP_BCOM] = opImm0 | opRetI | opArgI,
165 [OP_LSH] = opImm0 | opRetI | opArgI | opArg2I,
166 [OP_RSHI] = opImm0 | opRetI | opArgI | opArg2I,
167 [OP_RSHU] = opImm0 | opRetI | opArgI | opArg2I,
168 [OP_NEGF] = opImm0 | opRetF | opArgF,
169 [OP_ADDF] = opImm0 | opRetF | opArgF | opArg2F,
170 [OP_SUBF] = opImm0 | opRetF | opArgF | opArg2F,
171 [OP_DIVF] = opImm0 | opRetF | opArgF | opArg2F,
172 [OP_MULF] = opImm0 | opRetF | opArgF | opArg2F,
173 [OP_CVIF] = opImm0 | opRetF | opArgI,
174 [OP_CVFI] = opImm0 | opRetI | opArgF,
175 };
176
177 #ifdef DEBUG_VM
178 static const char *opnames[256] = {
179 "OP_UNDEF", "OP_IGNORE", "OP_BREAK", "OP_ENTER", "OP_LEAVE", "OP_CALL",
180 "OP_PUSH", "OP_POP", "OP_CONST", "OP_LOCAL", "OP_JUMP",
181 "OP_EQ", "OP_NE", "OP_LTI", "OP_LEI", "OP_GTI", "OP_GEI",
182 "OP_LTU", "OP_LEU", "OP_GTU", "OP_GEU", "OP_EQF", "OP_NEF",
183 "OP_LTF", "OP_LEF", "OP_GTF", "OP_GEF",
184 "OP_LOAD1", "OP_LOAD2", "OP_LOAD4", "OP_STORE1", "OP_STORE2",
185 "OP_STORE4", "OP_ARG", "OP_BLOCK_COPY",
186 "OP_SEX8", "OP_SEX16",
187 "OP_NEGI", "OP_ADD", "OP_SUB", "OP_DIVI", "OP_DIVU",
188 "OP_MODI", "OP_MODU", "OP_MULI", "OP_MULU", "OP_BAND",
189 "OP_BOR", "OP_BXOR", "OP_BCOM", "OP_LSH", "OP_RSHI", "OP_RSHU",
190 "OP_NEGF", "OP_ADDF", "OP_SUBF", "OP_DIVF", "OP_MULF",
191 "OP_CVIF", "OP_CVFI",
192 };
193
194 #define NOTIMPL(x) \
195 do { Com_Error(ERR_DROP, "instruction not implemented: %s", opnames[x]); } while(0)
196 #else
197 #define NOTIMPL(x) \
198 do { Com_Printf(S_COLOR_RED "instruction not implemented: %x\n", x); vm->compiled = qfalse; return; } while(0)
199 #endif
200
201 static void VM_Destroy_Compiled(vm_t *vm)
202 {
203 if (vm->codeBase) {
204 if (munmap(vm->codeBase, vm->codeLength))
205 Com_Printf(S_COLOR_RED "Memory unmap failed, possible memory leak\n");
206 }
207 vm->codeBase = NULL;
208 }
209
210 /*
211 =================
212 ErrJump
213 Error handler for jump/call to invalid instruction number
214 =================
215 */
216
217 static void __attribute__((__noreturn__)) ErrJump(unsigned num)
218 {
219 Com_Error(ERR_DROP, "program tried to execute code outside VM (%x)", num);
220 }
221
222 static int asmcall(int call, int pstack)
223 {
224 // save currentVM so as to allow for recursive VM entry
225 vm_t *savedVM = currentVM;
226 int i, ret;
227
228 // modify VM stack pointer for recursive VM entry
229 currentVM->programStack = pstack - 4;
230
231 if (sizeof(intptr_t) == sizeof(int)) {
232 intptr_t *argPosition = (intptr_t *)((byte *)currentVM->dataBase + pstack + 4);
233 argPosition[0] = -1 - call;
234 ret = currentVM->systemCall(argPosition);
235 } else {
236 intptr_t args[MAX_VMSYSCALL_ARGS];
237
238 args[0] = -1 - call;
239 int *argPosition = (int *)((byte *)currentVM->dataBase + pstack + 4);
240 for( i = 1; i < ARRAY_LEN(args); i++ )
241 args[i] = argPosition[i];
242
243 ret = currentVM->systemCall(args);
244 }
245
246 currentVM = savedVM;
247
248 return ret;
249 }
250
251 void _emit(vm_t *vm, unsigned isn, int pass)
252 {
253 #if 0
254 static int fd = -2;
255 if (fd == -2)
256 fd = open("code.bin", O_TRUNC|O_WRONLY|O_CREAT, 0644);
257 if (fd > 0)
258 write(fd, &isn, 4);
259 #endif
260
261 if (pass)
262 memcpy(vm->codeBase+vm->codeLength, &isn, 4);
263 vm->codeLength+=4;
264 }
265
266 #define emit(isn) _emit(vm, isn, pass)
267
268 static unsigned char off8(unsigned val)
269 {
270 if (val&3)
271 DIE("offset must be multiple of four");
272 if (val > 1020)
273 DIE("offset too large");
274 return val>>2;
275 }
276
277 // ARM is really crazy ...
278 static unsigned short rimm(unsigned val)
279 {
280 unsigned shift = 0;
281 if (val < 256)
282 return val;
283 // rotate the value until it fits
284 while (shift < 16 && (val>255 || !(val&3))) {
285 val = (val&3)<<30 | val>>2;
286 ++shift;
287 }
288 if (shift > 15 || val > 255) {
289 DIE("immediate cannot be encoded (%d, %d)\n", shift, val);
290 }
291 return (16-shift)<<8 | val;
292 }
293
294 // same as rimm but doesn't die, returns 0 if not encodable so don't call with zero as argument!
295 static unsigned short can_encode(unsigned val)
296 {
297 unsigned shift = 0;
298 if (!val)
299 DIE("can_encode: invalid argument");
300 if (val < 256)
301 return val;
302 // rotate the value until it fits
303 while (shift < 16 && (val>255 || !(val&3))) {
304 val = (val&3)<<30 | val>>2;
305 ++shift;
306 }
307 if (shift > 15 || val > 255) {
308 return 0;
309 }
310 return (16-shift)<<8 | val;
311 }
312
313 #define PREINDEX (1<<24)
314
315 #define rASR(i, reg) (0b10<<5 | ((i&31)<<7) | reg)
316 #define rLSL(i, reg) (0b00<<5 | ((i&31)<<7) | reg)
317 #define rLSR(i, reg) (0b01<<5 | ((i&31)<<7) | reg)
318 #define rROR(i, reg) (0b11<<5 | ((i&31)<<7) | reg)
319
320 // conditions
321 #define EQ (0b0000<<28)
322 #define NE (0b0001<<28)
323 #define CS (0b0010<<28)
324 #define HS CS
325 #define CC (0b0011<<28)
326 #define LO CC
327 #define MI (0b0100<<28)
328 #define PL (0b0101<<28)
329 #define VS (0b0110<<28)
330 #define VC (0b0111<<28)
331 #define HI (0b1000<<28)
332 #define LS (0b1001<<28)
333 #define GE (0b1010<<28)
334 #define LT (0b1011<<28)
335 #define GT (0b1100<<28)
336 #define LE (0b1101<<28)
337 #define AL (0b1110<<28)
338 #define cond(what, op) (what | (op&~AL))
339
340 // XXX: v not correctly computed
341 #define BKPT(v) (AL | 0b10010<<20 | ((v&~0xF)<<4) | 0b0111<<4 | (v&0xF))
342
343 #define YIELD (0b110010<<20 | 0b1111<<12 | 1)
344 #define NOP cond(AL, YIELD)
345
346 // immediate value must fit in 0xFF!
347 #define ANDi(dst, src, i) (AL | (0b001<<25) | (0b00000<<20) | (src<<16) | (dst<<12) | rimm(i))
348 #define EORi(dst, src, i) (AL | (0b001<<25) | (0b00010<<20) | (src<<16) | (dst<<12) | rimm(i))
349 #define SUBi(dst, src, i) (AL | (0b001<<25) | (0b00100<<20) | (src<<16) | (dst<<12) | rimm(i))
350 #define RSBi(dst, src, i) (AL | (0b001<<25) | (0b00110<<20) | (src<<16) | (dst<<12) | rimm(i))
351 #define ADDi(dst, src, i) (AL | (0b001<<25) | (0b01000<<20) | (src<<16) | (dst<<12) | rimm(i))
352 #define ADCi(dst, src, i) (AL | (0b001<<25) | (0b01010<<20) | (src<<16) | (dst<<12) | rimm(i))
353 #define SBCi(dst, src, i) (AL | (0b001<<25) | (0b01100<<20) | (src<<16) | (dst<<12) | rimm(i))
354 #define RSCi(dst, src, i) (AL | (0b001<<25) | (0b01110<<20) | (src<<16) | (dst<<12) | rimm(i))
355
356 #define ORRi(dst, src, i) (AL | (0b001<<25) | (0b11000<<20) | (src<<16) | (dst<<12) | rimm(i))
357 #define MOVi(dst, i) (AL | (0b001<<25) | (0b11010<<20) | (dst<<12) | rimm(i))
358 #define BICi(dst, src, i) (AL | (0b001<<25) | (0b11100<<20) | (src<<16) | (dst<<12) | rimm(i))
359 #define MVNi(dst, i) (AL | (0b001<<25) | (0b11110<<20) | (dst<<12) | rimm(i))
360
361 #define MOVW(dst, i) (AL | (0b11<<24) | ((((i)>>12)&0xF)<<16) | (dst<<12) | ((i)&((1<<12)-1)))
362 #define MOVT(dst, i) (AL | (0b11<<24) | (0b0100<<20) | ((((i)>>12)&0xF)<<16) | (dst<<12) | ((i)&((1<<12)-1)))
363
364 #define TSTi( src, i) (AL | (0b001<<25) | (0b10001<<20) | (src<<16) | rimm(i))
365 #define TEQi( src, i) (AL | (0b001<<25) | (0b10011<<20) | (src<<16) | rimm(i))
366 #define CMPi( src, i) (AL | (0b001<<25) | (0b10101<<20) | (src<<16) | rimm(i))
367 #define CMNi( src, i) (AL | (0b001<<25) | (0b10111<<20) | (src<<16) | rimm(i))
368
369 #define ANDSi(dst, src, i) (ANDi(dst, src, i) | (1<<20))
370 #define EORSi(dst, src, i) (EORi(dst, src, i) | (1<<20))
371 #define SUBSi(dst, src, i) (SUBi(dst, src, i) | (1<<20))
372 #define RSBSi(dst, src, i) (RSBi(dst, src, i) | (1<<20))
373 #define ADDSi(dst, src, i) (ADDi(dst, src, i) | (1<<20))
374 #define ADCSi(dst, src, i) (ADCi(dst, src, i) | (1<<20))
375 #define SBCSi(dst, src, i) (SBCi(dst, src, i) | (1<<20))
376 #define RSCSi(dst, src, i) (RSCi(dst, src, i) | (1<<20))
377
378 #define ORRSi(dst, src, i) (ORRi(dst, src, i) | (1<<20))
379 #define MOVSi(dst, i) (MOVi(dst, i) | (1<<20))
380 #define BICSi(dst, src, i) (BICi(dst, src, i) | (1<<20))
381 #define MVNSi(dst, i) (MVNi(dst, src, i) | (1<<20))
382
383 #define AND(dst, src, reg) (AL | (0b000<<25) | (0b00000<<20) | (src<<16) | (dst<<12) | reg)
384 #define EOR(dst, src, reg) (AL | (0b000<<25) | (0b00010<<20) | (src<<16) | (dst<<12) | reg)
385 #define SUB(dst, src, reg) (AL | (0b000<<25) | (0b00100<<20) | (src<<16) | (dst<<12) | reg)
386 #define RSB(dst, src, reg) (AL | (0b000<<25) | (0b00110<<20) | (src<<16) | (dst<<12) | reg)
387 #define ADD(dst, src, reg) (AL | (0b000<<25) | (0b01000<<20) | (src<<16) | (dst<<12) | reg)
388 #define ADC(dst, src, reg) (AL | (0b000<<25) | (0b01010<<20) | (src<<16) | (dst<<12) | reg)
389 #define SBC(dst, src, reg) (AL | (0b000<<25) | (0b01100<<20) | (src<<16) | (dst<<12) | reg)
390 #define RSC(dst, src, reg) (AL | (0b000<<25) | (0b01110<<20) | (src<<16) | (dst<<12) | reg)
391
392 #define ORR(dst, src, reg) (AL | (0b000<<25) | (0b11000<<20) | (src<<16) | (dst<<12) | reg)
393 #define MOV(dst, src) (AL | (0b000<<25) | (0b11010<<20) | (dst<<12) | src)
394
395 #define LSL(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0001<<4) | src)
396 #define LSR(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0011<<4) | src)
397 #define ASR(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0101<<4) | src)
398 #define ROR(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0111<<4) | src)
399
400 #define LSLi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b000<<4) | src)
401 #define LSRi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b010<<4) | src)
402 #define ASRi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b100<<4) | src)
403 #define RORi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b110<<4) | src)
404 #define RRX(dst, src) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (0b110<<4) | src)
405
406 #define BIC(dst, src, reg) (AL | (0b000<<25) | (0b11100<<20) | (src<<16) | (dst<<12) | reg)
407 #define MVN(dst, reg) (AL | (0b000<<25) | (0b11110<<20) | (dst<<12) | reg)
408
409 #define TST( src, reg) (AL | (0b000<<25) | (0b10001<<20) | (src<<16) | reg)
410 #define TEQ( src, reg) (AL | (0b000<<25) | (0b10011<<20) | (src<<16) | reg)
411 #define CMP( src, reg) (AL | (0b000<<25) | (0b10101<<20) | (src<<16) | reg)
412 #define CMN( src, reg) (AL | (0b000<<25) | (0b10111<<20) | (src<<16) | reg)
413
414 #define LDRa(dst, base, off) (AL | (0b011<<25) | (0b1100<<21) | (1<<20) | base<<16 | dst<<12 | off)
415 #define LDRx(dst, base, off) (AL | (0b011<<25) | (0b1000<<21) | (1<<20) | base<<16 | dst<<12 | off)
416
417 #define LDRai(dst, base, off) (AL | (0b010<<25) | (0b1100<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
418 #define LDRxi(dst, base, off) (AL | (0b010<<25) | (0b1000<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
419 #define LDRxiw(dst, base, off) (AL | (0b010<<25) | (0b1001<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
420
421 #define LDRTa(dst, base, off) (AL | (0b011<<25) | (0b0101<<21) | (1<<20) | base<<16 | dst<<12 | off)
422 #define LDRTx(dst, base, off) (AL | (0b011<<25) | (0b0001<<21) | (1<<20) | base<<16 | dst<<12 | off)
423 #define LDRTai(dst, base, off) (AL | (0b010<<25) | (0b0101<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
424 #define LDRTxi(dst, base, off) (AL | (0b010<<25) | (0b0001<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
425
426 #define LDRBa(dst, base, off) (AL | (0b011<<25) | (0b1110<<21) | (1<<20) | base<<16 | dst<<12 | off)
427 #define LDRSBai(dst, base, off) (AL | (0b000<<25) | (0b0110<<21) | (1<<20) | base<<16 | dst<<12 | ((off&0xF0)<<4)|0b1101<<4|(off&0x0F))
428 #define STRBa(dst, base, off) (AL | (0b011<<25) | (0b1110<<21) | (0<<20) | base<<16 | dst<<12 | off)
429
430 #define LDRHa(dst, base, off) (AL | (0b000<<25) | (0b1100<<21) | (1<<20) | base<<16 | dst<<12 | (0b1011<<4) | off)
431 #define LDRSHai(dst, base, off) (AL | (0b000<<25) | (0b1110<<21) | (1<<20) | base<<16 | dst<<12 | ((off&0xF0)<<4)|0b1111<<4|(off&0x0F))
432 #define STRHa(dst, base, off) (AL | (0b000<<25) | (0b1100<<21) | (0<<20) | base<<16 | dst<<12 | (0b1011<<4) | off)
433
434 #define STRa(dst, base, off) (AL | (0b011<<25) | (0b1100<<21) | (0<<20) | base<<16 | dst<<12 | off)
435 #define STRx(dst, base, off) (AL | (0b011<<25) | (0b1000<<21) | (0<<20) | base<<16 | dst<<12 | off)
436 #define STRai(dst, base, off) (AL | (0b010<<25) | (0b1100<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
437 #define STRxi(dst, base, off) (AL | (0b010<<25) | (0b1000<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
438 #define STRaiw(dst, base, off) (AL | (0b010<<25) | (0b1101<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
439 #define STRxiw(dst, base, off) (AL | (0b010<<25) | (0b1001<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
440
441 // load with post-increment
442 #define POP1(reg) (AL | (0b010<<25) | (0b0100<<21) | (1<<20) | SP<<16 | reg<<12 | reg)
443 // store with post-increment
444 #define PUSH1(reg) (AL | (0b010<<25) | (0b1001<<21) | (0<<20) | SP<<16 | reg<<12 | 4)
445
446 // branch to target address (for small jumps)
447 #define Bi(i) \
448 (AL | (0b10)<<26 | (1<<25) /*I*/ | (0<<24) /*L*/ | (i))
449 // call subroutine
450 #define BLi(i) \
451 (AL | (0b10)<<26 | (1<<25) /*I*/ | (1<<24) /*L*/ | (i))
452 // branch and exchange (register)
453 #define BX(reg) \
454 (AL | 0b00010010<<20 | 0b1111<<16 | 0b1111<<12 | 0b1111<<8| 0b0001<<4 | reg)
455 // call subroutine (register)
456 #define BLX(reg) \
457 (AL | 0b00010010<<20 | 0b1111<<16 | 0b1111<<12 | 0b1111<<8| 0b0011<<4 | reg)
458
459 #define PUSH(mask) (AL | (0b100100<<22) | (0b10<<20) | (0b1101<<16) | mask)
460 #define PUSH2(r1, r2) (AL | (0b100100<<22) | (0b10<<20) | (0b1101<<16) | 1<<r1 | 1<<r2)
461 //#define PUSH1(reg) STRxiw(SP, reg, 4)
462
463 #define POP(mask) (0xe8bd0000|mask)
464
465 #define STM(base, regs) \
466 (AL | 0b100<<25 | 0<<24/*P*/| 0<<24/*U*/| 0<<24/*S*/| 0<<24/*W*/ | (base<<16) | (regs&~(1<<16)))
467
468 // note: op1 and op2 must not be the same
469 #define MUL(op1, op2, op3) \
470 (AL | 0b0000000<<21 | (1<<20) /*S*/ | (op1<<16) | (op3<<8) | 0b1001<<4 | (op2))
471
472 // puts integer in R0
473 #define emit_MOVR0i(arg) emit_MOVRxi(R0, arg)
474
475 // puts integer arg in register reg
476 #define emit_MOVRxi(reg, arg) do { \
477 emit(MOVW(reg, (arg&0xFFFF))); \
478 if (arg > 0xFFFF) \
479 emit(MOVT(reg, (((arg>>16)&0xFFFF)))); \
480 } while(0)
481
482 // puts integer arg in register reg. adds nop if only one instr is needed to
483 // make size constant
484 #define emit_MOVRxi_or_NOP(reg, arg) do { \
485 emit(MOVW(reg, (arg&0xFFFF))); \
486 if (arg > 0xFFFF) \
487 emit(MOVT(reg, (((arg>>16)&0xFFFF)))); \
488 else \
489 emit(NOP); \
490 } while(0)
491
492 // arm core register -> singe precision register
493 #define VMOVass(Vn, Rt) (AL|(0b1110<<24)|(0b000<<21)|(0<<20)| ((Vn>>1)<<16) | (Rt<<12) | (0b1010<<8) | ((Vn&1)<<7) | (1<<4))
494 // singe precision register -> arm core register
495 #define VMOVssa(Rt, Vn) (AL|(0b1110<<24)|(0b000<<21)|(1<<20)| ((Vn>>1)<<16) | (Rt<<12) | (0b1010<<8) | ((Vn&1)<<7) | (1<<4))
496
497 #define _VCVT_F(Vd, Vm, opc2, op) \
498 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b111<<19)|(opc2<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|(op<<7)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
499 #define VCVT_F32_U32(Sd, Sm) _VCVT_F(Sd, Sm, 0b000, 0 /* unsigned */)
500 #define VCVT_U32_F32(Sd, Sm) _VCVT_F(Sd, Sm, 0b100, 1 /* round zero */)
501 #define VCVT_F32_S32(Sd, Sm) _VCVT_F(Sd, Sm, 0b000, 1 /* unsigned */)
502 #define VCVT_S32_F32(Sd, Sm) _VCVT_F(Sd, Sm, 0b101, 1 /* round zero */)
503
504 #define VLDRa(Vd, Rn, i) (AL|(0b1101<<24)|1<<23|((Vd&1)<<22)|1<<20|(Rn<<16)|((Vd>>1)<<12)|(0b1010<<8)|off8(i))
505 #define VSTRa(Vd, Rn, i) (AL|(0b1101<<24)|1<<23|((Vd&1)<<22)|0<<20|(Rn<<16)|((Vd>>1)<<12)|(0b1010<<8)|off8(i))
506
507 #define VNEG_F32(Vd, Vm) \
508 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b11<<20)|(1<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
509
510 #define VADD_F32(Vd, Vn, Vm) \
511 (AL|(0b11100<<23)|((Vd&1)<<22)|(0b11<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|((Vn&1)<<7)|(0<<6)|((Vm&1)<<5)|(Vm>>1))
512 #define VSUB_F32(Vd, Vn, Vm) \
513 (AL|(0b11100<<23)|((Vd&1)<<22)|(0b11<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|((Vn&1)<<7)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
514 #define VMUL_F32(Vd, Vn, Vm) \
515 (AL|(0b11100<<23)|((Vd&1)<<22)|(0b10<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101)<<9|(0<<8)|((Vn&1)<<7)|(0<<6)|((Vm&1)<<5)|(Vm>>1))
516 #define VDIV_F32(Vd, Vn, Vm) \
517 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b00<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|((Vn&1)<<7)|(0<<6)|((Vm&1)<<5)|(Vm>>1))
518
519 #define _VCMP_F32(Vd, Vm, E) \
520 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b11<<20)|((0b0100)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|(E<<7)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
521 #define VCMP_F32(Vd, Vm) _VCMP_F32(Vd, Vm, 0)
522
523 #define VMRS(Rt) \
524 (AL|(0b11101111<<20)|(0b0001<<16)|(Rt<<12)|(0b1010<<8)|(1<<4))
525
526 // check if instruction in R0 is within range. Clobbers R1, R12
527 #define CHECK_JUMP do { \
528 static int bytes_to_skip = -1; \
529 static unsigned branch = -1; \
530 emit_MOVRxi(R1, (unsigned)vm->instructionCount); \
531 emit(CMP(R0, R1)); \
532 if (branch == -1) \
533 branch = vm->codeLength; \
534 emit(cond(LT, Bi(j_rel(bytes_to_skip)))); \
535 emit_MOVRxi_or_NOP(R12, (unsigned)ErrJump); \
536 emit(BLX(R12)); \
537 if (bytes_to_skip == -1) \
538 bytes_to_skip = vm->codeLength - branch; \
539 } while(0)
540
541 //#define CONST_OPTIMIZE
542 #ifdef CONST_OPTIMIZE
543 #define MAYBE_EMIT_CONST() \
544 if (got_const) \
545 { \
546 got_const = 0; \
547 vm->instructionPointers[instruction-1] = assembler_get_code_size(); \
548 STACK_PUSH(4); \
549 emit("movl $%d, (%%r9, %%rbx, 4)", const_value); \
550 }
551 #else
552 #define MAYBE_EMIT_CONST()
553 #endif
554
555 // optimize: use load multiple
556 #define IJ(comparator) do { \
557 MAYBE_EMIT_CONST(); \
558 emit_MOVRxi(R0, arg.i); \
559 CHECK_JUMP; \
560 emit(LDRTxi(R0, rOPSTACK, 4)); \
561 emit(LDRTxi(R1, rOPSTACK, 4)); \
562 emit(CMP(R1, R0)); \
563 emit(cond(comparator, Bi(j_rel(vm->instructionPointers[arg.i]-vm->codeLength)))); \
564 } while (0)
565
566 #define FJ(comparator) do { \
567 emit_MOVRxi(R0, arg.i); \
568 CHECK_JUMP; \
569 emit(SUBi(rOPSTACK, rOPSTACK, 8)); \
570 emit(VLDRa(S15, rOPSTACK, 4)); \
571 emit(VLDRa(S14, rOPSTACK, 8)); \
572 emit(VCMP_F32(S15, S14)); \
573 emit(VMRS(APSR_nzcv)); \
574 emit(cond(comparator, Bi(j_rel(vm->instructionPointers[arg.i]-vm->codeLength)))); \
575 } while (0)
576
577 #define printreg(reg) emit(PUSH1(R3)); emit(BLX(reg)); emit(POP1(R3));
578
579 static inline unsigned _j_rel(int x, int pc)
580 {
581 if (x&3) goto err;
582 x = (x>>2)-2;
583 if (x < 0)
584 {
585 if ((x&(0xFF<<24)) != 0xFF<<24)
586 goto err;
587 x &= ~(0xFF<<24);
588 }
589 else if (x&(0xFF<<24))
590 goto err;
591 return x;
592 err:
593 DIE("jump %d out of range at %d", x, pc);
594 }
595
596 void VM_Compile(vm_t *vm, vmHeader_t *header)
597 {
598 unsigned char *code;
599 int i_count, pc = 0;
600 int pass;
601 int codeoffsets[1024];
602
603 #define j_rel(x) (pass?_j_rel(x, pc):0xBAD)
604 #define OFFSET(i) (pass?(j_rel(codeoffsets[i]-vm->codeLength)):(0xF000000F))
605 #define new_offset() (offsidx++)
606 #define get_offset(i) (codeoffsets[i])
607 #define save_offset(i) (codeoffsets[i] = vm->codeLength)
608 #define OFF_CODE 0
609 #define OFF_IMMEDIATES 1
610
611 vm->compiled = qfalse;
612
613 vm->codeBase = NULL;
614 vm->codeLength = 0;
615
616 for (pass = 0; pass < 2; ++pass) {
617
618 int offsidx = 0;
619
620 // const optimization
621 unsigned got_const = 0, const_value = 0;
622
623 if(pass)
624 {
625 vm->codeBase = mmap(NULL, vm->codeLength, PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
626 if(vm->codeBase == MAP_FAILED)
627 Com_Error(ERR_FATAL, "VM_CompileARM: can't mmap memory");
628 vm->codeLength = 0;
629 }
630
631 //int (*entry)(vm_t*, int*, int*);
632 emit(PUSH((((1<<8)-1)<<4)|(1<<14))); // push R4-R11, LR
633 emit(SUBi(SP, SP, 12)); // align stack!
634 emit(LDRai(rCODEBASE, R0, offsetof(vm_t, codeBase)));
635 emit(LDRai(rDATABASE, R0, offsetof(vm_t, dataBase)));
636 emit(LDRai(rDATAMASK, R0, offsetof(vm_t, dataMask)));
637 emit(LDRai(rPSTACK, R1, 0));
638 emit(MOV(rOPSTACK, R2)); // TODO: reverse opstack to avoid writing to return address
639 emit(MOV(rOPSTACKBASE, rOPSTACK));
640
641 emit(BLi(OFFSET(OFF_CODE)));
642
643 // save return value in r0
644 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
645
646 emit(ADDi(SP, SP, 12)); // align stack!
647 emit(POP((((1<<8)-1)<<4)|(1<<15))); // pop R4-R11, LR -> PC
648
649 /* save some immediates here */
650 emit(BKPT(0));
651 emit(BKPT(0));
652 save_offset(OFF_IMMEDIATES);
653 // emit((unsigned)whatever);
654 emit(BKPT(0));
655 emit(BKPT(0));
656
657 save_offset(OFF_CODE);
658 offsidx = OFF_IMMEDIATES+1;
659
660 code = (unsigned char *) header + header->codeOffset;
661 pc = 0;
662
663 for (i_count = 0; i_count < header->instructionCount; i_count++) {
664 union {
665 unsigned char b[4];
666 unsigned int i;
667 } arg;
668 unsigned char op = code[pc++];
669
670 vm->instructionPointers[i_count] = vm->codeLength;
671
672 if (vm_opInfo[op] & opImm4)
673 {
674 memcpy(arg.b, &code[pc], 4);
675 pc += 4;
676 #ifdef EXCESSIVE_DEBUG
677 Com_Printf("%d: instruction %d (%s %d), offset %d\n", pass, i_count, opnames[op], arg.i, vm->codeLength);
678 #endif
679 }
680 else if (vm_opInfo[op] & opImm1)
681 {
682 arg.b[0] = code[pc];
683 ++pc;
684 #ifdef EXCESSIVE_DEBUG
685 Com_Printf("%d: instruction %d (%s %hhd), offset %d\n", pass, i_count, opnames[op], arg.i, vm->codeLength);
686 #endif
687 }
688 else
689 {
690 #ifdef EXCESSIVE_DEBUG
691 Com_Printf("%d: instruction %d (%s), offset %d\n", pass, i_count, opnames[op], vm->codeLength);
692 #endif
693 }
694
695 // TODO: for debug only
696 //emit_MOVRxi(R4, i_count);
697
698 switch ( op )
699 {
700 case OP_UNDEF:
701 break;
702
703 case OP_IGNORE:
704 NOTIMPL(op);
705 break;
706
707 case OP_BREAK:
708 emit(BKPT(0));
709 break;
710
711 case OP_ENTER:
712 MAYBE_EMIT_CONST();
713 emit(PUSH1(LR));
714 emit(SUBi(SP, SP, 12)); // align stack
715 if (arg.i == 0 || can_encode(arg.i))
716 {
717 emit(SUBi(rPSTACK, rPSTACK, arg.i)); // pstack -= arg
718 }
719 else
720 {
721 emit_MOVR0i(arg.i);
722 emit(SUB(rPSTACK, rPSTACK, R0)); // pstack -= arg
723 }
724 break;
725
726 case OP_LEAVE:
727 if (arg.i == 0 || can_encode(arg.i))
728 {
729 emit(ADDi(rPSTACK, rPSTACK, arg.i)); // pstack += arg
730 }
731 else
732 {
733 emit_MOVR0i(arg.i);
734 emit(ADD(rPSTACK, rPSTACK, R0)); // pstack += arg
735 }
736 emit(ADDi(SP, SP, 12));
737 emit(0xe49df004); // pop pc
738 break;
739
740 case OP_CALL:
741 #if 0
742 // save next instruction
743 emit_MOVR0i(i_count);
744 emit(STRa(R0, rDATABASE, rPSTACK)); // dataBase[pstack] = r0
745 #endif
746 if (got_const) {
747 NOTIMPL(op);
748 } else {
749 static int bytes_to_skip = -1;
750 static unsigned start_block = -1;
751 MAYBE_EMIT_CONST();
752 // get instruction nr from stack
753 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
754 emit(CMPi(R0, 0)); // check if syscall
755 if (start_block == -1)
756 start_block = vm->codeLength;
757 emit(cond(LT, Bi(j_rel(bytes_to_skip))));
758 CHECK_JUMP;
759 emit_MOVRxi_or_NOP(R1, (unsigned)vm->instructionPointers);
760 emit(LDRa(R0, R1, rLSL(2, R0))); // r0 = ((int*)r1)[r0]
761 emit(ADD(R0, rCODEBASE, R0)); // r0 = codeBase+r0
762 emit(BLX(R0));
763 emit(Bi(j_rel(vm->instructionPointers[i_count+1]-vm->codeLength)));
764 if (bytes_to_skip == -1)
765 bytes_to_skip = vm->codeLength - start_block;
766 emit(MOV(R1, rPSTACK));
767 emit_MOVRxi(R12, (unsigned)asmcall);
768 emit(BLX(R12));
769 // store return value
770 emit(STRaiw(R0, rOPSTACK, 4)); // opstack+=4; *opstack = r0
771 }
772 break;
773
774 case OP_PUSH:
775 MAYBE_EMIT_CONST();
776 emit(ADDi(rOPSTACK, rOPSTACK, 4));
777 break;
778
779 case OP_POP:
780 MAYBE_EMIT_CONST();
781 emit(SUBi(rOPSTACK, rOPSTACK, 4));
782 break;
783
784 case OP_CONST:
785 MAYBE_EMIT_CONST();
786 emit_MOVR0i(arg.i);
787 emit(STRaiw(R0, rOPSTACK, 4)); // opstack+=4; *opstack = r0
788 break;
789
790 case OP_LOCAL:
791 MAYBE_EMIT_CONST();
792 if (arg.i == 0 || can_encode(arg.i))
793 {
794 emit(ADDi(R0, rPSTACK, arg.i)); // r0 = pstack+arg
795 }
796 else
797 {
798 emit_MOVR0i(arg.i);
799 emit(ADD(R0, rPSTACK, R0)); // r0 = pstack+arg
800 }
801 emit(STRaiw(R0, rOPSTACK, 4)); // opstack+=4; *opstack = r0
802 break;
803
804 case OP_JUMP:
805 if(got_const) {
806 NOTIMPL(op);
807 } else {
808 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
809 CHECK_JUMP;
810 emit_MOVRxi(R1, (unsigned)vm->instructionPointers);
811 emit(LDRa(R0, R1, rLSL(2, R0))); // r0 = ((int*)r1)[r0]
812 emit(ADD(R0, rCODEBASE, R0)); // r0 = codeBase+r0
813 emit(BLX(R0));
814 }
815 break;
816
817 case OP_EQ:
818 IJ(EQ);
819 break;
820
821 case OP_NE:
822 IJ(NE);
823 break;
824
825 case OP_LTI:
826 IJ(LT);
827 break;
828
829 case OP_LEI:
830 IJ(LE);
831 break;
832
833 case OP_GTI:
834 IJ(GT);
835 break;
836
837 case OP_GEI:
838 IJ(GE);
839 break;
840
841 case OP_LTU:
842 IJ(LO);
843 break;
844
845 case OP_LEU:
846 IJ(LS);
847 break;
848
849 case OP_GTU:
850 IJ(HI);
851 break;
852
853 case OP_GEU:
854 IJ(HS);
855 break;
856
857 case OP_EQF:
858 FJ(EQ);
859 break;
860
861 case OP_NEF:
862 FJ(NE);
863 break;
864
865 case OP_LTF:
866 FJ(LT);
867 break;
868
869 case OP_LEF:
870 FJ(LE);
871 break;
872
873 case OP_GTF:
874 FJ(GT);
875 break;
876
877 case OP_GEF:
878 FJ(GE);
879 break;
880
881 case OP_LOAD1:
882 MAYBE_EMIT_CONST();
883 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
884 emit(AND(R0, rDATAMASK, R0)); // r0 = r0 & rDATAMASK
885 emit(LDRBa(R0, rDATABASE, R0)); // r0 = (unsigned char)dataBase[r0]
886 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
887 break;
888
889 case OP_LOAD2:
890 MAYBE_EMIT_CONST();
891 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
892 emit(AND(R0, rDATAMASK, R0)); // r0 = r0 & rDATAMASK
893 emit(LDRHa(R0, rDATABASE, R0)); // r0 = (unsigned short)dataBase[r0]
894 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
895 break;
896
897 case OP_LOAD4:
898 MAYBE_EMIT_CONST();
899 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
900 emit(AND(R0, rDATAMASK, R0)); // r0 = r0 & rDATAMASK
901 emit(LDRa(R0, rDATABASE, R0)); // r0 = dataBase[r0]
902 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
903 break;
904
905 case OP_STORE1:
906 MAYBE_EMIT_CONST();
907 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
908 emit(LDRTxi(R1, rOPSTACK, 4)); // r1 = *opstack; rOPSTACK -= 4
909 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
910 emit(STRBa(R0, rDATABASE, R1)); // database[r1] = r0
911 break;
912
913 case OP_STORE2:
914 MAYBE_EMIT_CONST();
915 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
916 emit(LDRTxi(R1, rOPSTACK, 4)); // r1 = *opstack; rOPSTACK -= 4
917 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
918 emit(STRHa(R0, rDATABASE, R1)); // database[r1] = r0
919 break;
920
921 case OP_STORE4:
922 MAYBE_EMIT_CONST();
923 // optimize: use load multiple
924 // value
925 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
926 // pointer
927 emit(LDRTxi(R1, rOPSTACK, 4)); // r1 = *opstack; rOPSTACK -= 4
928 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
929 // store value at pointer
930 emit(STRa(R0, rDATABASE, R1)); // database[r1] = r0
931 break;
932
933 case OP_ARG:
934 MAYBE_EMIT_CONST();
935 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
936 emit(ADDi(R1, rPSTACK, arg.b[0])); // r1 = programStack+arg
937 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
938 emit(STRa(R0, rDATABASE, R1)); // dataBase[r1] = r0
939 break;
940
941 case OP_BLOCK_COPY:
942 MAYBE_EMIT_CONST();
943 emit(LDRTxi(R1, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
944 emit(LDRTxi(R0, rOPSTACK, 4));
945 emit_MOVRxi(R2, arg.i);
946 emit_MOVRxi(R12, (unsigned)VM_BlockCopy);
947 emit(BLX(R12));
948 break;
949
950 case OP_SEX8:
951 MAYBE_EMIT_CONST();
952 emit(LDRSBai(R0, rOPSTACK, 0)); // sign extend *opstack
953 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
954 break;
955
956 case OP_SEX16:
957 MAYBE_EMIT_CONST();
958 emit(LDRSHai(R0, rOPSTACK, 0)); // sign extend *opstack
959 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
960 break;
961
962 case OP_NEGI:
963 MAYBE_EMIT_CONST();
964 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
965 emit(RSBi(R0, R0, 0)); // r0 = -r0
966 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
967 break;
968
969 case OP_ADD:
970 MAYBE_EMIT_CONST();
971 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
972 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
973 emit(ADD(R0, R1, R0)); // r0 = r1 + r0
974 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
975 break;
976
977 case OP_SUB:
978 MAYBE_EMIT_CONST();
979 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
980 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
981 emit(SUB(R0, R1, R0)); // r0 = r1 - r0
982 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
983 break;
984
985 case OP_DIVI:
986 case OP_DIVU:
987 MAYBE_EMIT_CONST();
988 emit(LDRai(R1, rOPSTACK, 0)); // r1 = *opstack
989 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r0 = *opstack
990 if ( op == OP_DIVI )
991 emit_MOVRxi(R12, (unsigned)__aeabi_idiv);
992 else
993 emit_MOVRxi(R12, (unsigned)__aeabi_uidiv);
994 emit(BLX(R12));
995 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
996 break;
997
998 case OP_MODI:
999 case OP_MODU:
1000 MAYBE_EMIT_CONST();
1001 emit(LDRai(R1, rOPSTACK, 0)); // r1 = *opstack
1002 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r0 = *opstack
1003 if ( op == OP_MODI )
1004 emit_MOVRxi(R12, (unsigned)__aeabi_idivmod);
1005 else
1006 emit_MOVRxi(R12, (unsigned)__aeabi_uidivmod);
1007 emit(BLX(R12));
1008 emit(STRai(R1, rOPSTACK, 0)); // *opstack = r1
1009 break;
1010
1011 case OP_MULI:
1012 case OP_MULU:
1013 MAYBE_EMIT_CONST();
1014 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1015 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1016 emit(MUL(R0, R1, R0)); // r0 = r1 * r0
1017 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1018 break;
1019
1020 case OP_BAND:
1021 MAYBE_EMIT_CONST();
1022 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1023 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1024 emit(AND(R0, R1, R0)); // r0 = r1 & r0
1025 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1026 break;
1027
1028 case OP_BOR:
1029 MAYBE_EMIT_CONST();
1030 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1031 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1032 emit(ORR(R0, R1, R0)); // r0 = r1 | r0
1033 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1034 break;
1035
1036 case OP_BXOR:
1037 MAYBE_EMIT_CONST();
1038 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1039 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1040 emit(EOR(R0, R1, R0)); // r0 = r1 ^ r0
1041 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1042 break;
1043
1044 case OP_BCOM:
1045 MAYBE_EMIT_CONST();
1046 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1047 emit(MVN(R0, R0)); // r0 = ~r0
1048 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1049 break;
1050
1051 case OP_LSH:
1052 MAYBE_EMIT_CONST();
1053 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1054 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1055 emit(LSL(R0, R1, R0)); // r0 = r1 << r0
1056 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1057 break;
1058
1059 case OP_RSHI:
1060 MAYBE_EMIT_CONST();
1061 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1062 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1063 emit(ASR(R0, R1, R0)); // r0 = r1 >> r0
1064 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1065 break;
1066
1067 case OP_RSHU:
1068 MAYBE_EMIT_CONST();
1069 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1070 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1071 emit(LSR(R0, R1, R0)); // r0 = (unsigned)r1 >> r0
1072 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1073 break;
1074
1075 case OP_NEGF:
1076 MAYBE_EMIT_CONST();
1077 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1078 emit(VNEG_F32(S14, S14)); // s15 = -s14
1079 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1080 break;
1081
1082 case OP_ADDF:
1083 MAYBE_EMIT_CONST();
1084 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1085 // vldr can't modify rOPSTACK so
1086 // we'd either need to change it
1087 // with sub or use regular ldr+vmov
1088 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1089 emit(VMOVass(S15,R0)); // s15 = r0
1090 emit(VADD_F32(S14, S15, S14)); // s14 = s14 + s15
1091 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1092 break;
1093
1094 case OP_SUBF:
1095 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1096 // see OP_ADDF
1097 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1098 emit(VMOVass(S15,R0)); // s15 = r0
1099 emit(VSUB_F32(S14, S15, S14)); // s14 = s14 - s15
1100 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1101 break;
1102
1103 case OP_DIVF:
1104 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1105 // see OP_ADDF
1106 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1107 emit(VMOVass(S15,R0)); // s15 = r0
1108 emit(VDIV_F32(S14, S15, S14)); // s14 = s14 / s15
1109 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1110 break;
1111
1112 case OP_MULF:
1113 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1114 // see OP_ADDF
1115 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1116 emit(VMOVass(S15,R0)); // s15 = r0
1117 emit(VMUL_F32(S14, S15, S14)); // s14 = s14 * s15
1118 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1119 break;
1120
1121 case OP_CVIF:
1122 MAYBE_EMIT_CONST();
1123 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1124 emit(VMOVass(S14,R0)); // s14 = r0
1125 emit(VCVT_F32_S32(S14, S14)); // s15 = (float)s14
1126 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1127 break;
1128
1129 case OP_CVFI:
1130 MAYBE_EMIT_CONST();
1131 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1132 emit(VCVT_S32_F32(S14, S14)); // s15 = (int)s14
1133 emit(VMOVssa(R0,S14)); // s14 = r0
1134 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1135 break;
1136 }
1137 }
1138
1139 // never reached
1140 emit(BKPT(0));
1141 } // pass
1142
1143 if (mprotect(vm->codeBase, vm->codeLength, PROT_READ|PROT_EXEC/* |PROT_WRITE */)) {
1144 VM_Destroy_Compiled(vm);
1145 DIE("mprotect failed");
1146 }
1147
1148 // clear icache, http://blogs.arm.com/software-enablement/141-caches-and-self-modifying-code/
1149 __clear_cache(vm->codeBase, vm->codeBase+vm->codeLength);
1150
1151 vm->destroy = VM_Destroy_Compiled;
1152 vm->compiled = qtrue;
1153 }
1154
1155 int VM_CallCompiled(vm_t *vm, int *args)
1156 {
1157 byte stack[OPSTACK_SIZE + 15];
1158 int *opStack;
1159 int programStack = vm->programStack;
1160 int stackOnEntry = programStack;
1161 byte *image = vm->dataBase;
1162 int *argPointer;
1163 int retVal;
1164
1165 currentVM = vm;
1166
1167 vm->currentlyInterpreting = qtrue;
1168
1169 programStack -= ( 8 + 4 * MAX_VMMAIN_ARGS );
1170 argPointer = (int *)&image[ programStack + 8 ];
1171 memcpy( argPointer, args, 4 * MAX_VMMAIN_ARGS );
1172 argPointer[-1] = 0;
1173 argPointer[-2] = -1;
1174
1175
1176 opStack = PADP(stack, 16);
1177 *opStack = 0xDEADBEEF;
1178
1179 #if 0
1180 Com_Printf("r5 opStack:\t\t%p\n", opStack);
1181 Com_Printf("r7 codeBase:\t\t%p\n", vm->codeBase);
1182 Com_Printf("r8 programStack:\t0x%x\n", programStack);
1183 Com_Printf("r9 dataBase:\t\t%p\n", vm->dataBase);
1184 #endif
1185
1186 /* call generated code */
1187 {
1188 //int (*entry)(void *, int, void *, int);
1189 int (*entry)(vm_t*, int*, int*);
1190
1191 entry = (void *)(vm->codeBase);
1192 //__asm__ volatile("bkpt");
1193 //retVal = entry(vm->codeBase, programStack, vm->dataBase, vm->dataMask);
1194 retVal = entry(vm, &programStack, opStack);
1195 }
1196
1197 if(*opStack != 0xDEADBEEF)
1198 {
1199 Com_Error(ERR_DROP, "opStack corrupted in compiled code");
1200 }
1201
1202 if(programStack != stackOnEntry - (8 + 4 * MAX_VMMAIN_ARGS))
1203 Com_Error(ERR_DROP, "programStack corrupted in compiled code");
1204
1205 vm->programStack = stackOnEntry;
1206 vm->currentlyInterpreting = qfalse;
1207
1208 return retVal;
1209 }
615615 }
616616 }
617617
618 #define MAC_EVENT_PUMP_MSEC 5
619618
620619 /*
621620 ==================
130130
131131 /*
132132 ============
133 R_GetCommandBuffer
133 R_GetCommandBufferReserved
134134
135135 make sure there is enough command space
136136 ============
137137 */
138 void *R_GetCommandBuffer( int bytes ) {
138 void *R_GetCommandBufferReserved( int bytes, int reservedBytes ) {
139139 renderCommandList_t *cmdList;
140140
141141 if ( !tr.registered ) { //DAJ BUGFIX
145145 bytes = PAD(bytes, sizeof(void *));
146146
147147 // always leave room for the end of list command
148 if ( cmdList->used + bytes + 4 > MAX_RENDER_COMMANDS ) {
149 if ( bytes > MAX_RENDER_COMMANDS - 4 ) {
148 if ( cmdList->used + bytes + sizeof( int ) + reservedBytes > MAX_RENDER_COMMANDS ) {
149 if ( bytes > MAX_RENDER_COMMANDS - sizeof( int ) ) {
150150 ri.Error( ERR_FATAL, "R_GetCommandBuffer: bad size %i", bytes );
151151 }
152152 // if we run out of room, just start dropping commands
156156 cmdList->used += bytes;
157157
158158 return cmdList->cmds + cmdList->used - bytes;
159 }
160
161
162 /*
163 =============
164 R_GetCommandBuffer
165
166 returns NULL if there is not enough space for important commands
167 =============
168 */
169 void *R_GetCommandBuffer( int bytes ) {
170 return R_GetCommandBufferReserved( bytes, PAD( sizeof( swapBuffersCommand_t ), sizeof(void *) ) );
159171 }
160172
161173
617629 if ( !tr.registered ) {
618630 return;
619631 }
620 cmd = R_GetCommandBuffer( sizeof( *cmd ) );
632 cmd = R_GetCommandBufferReserved( sizeof( *cmd ), 0 );
621633 if ( !cmd ) {
622634 return;
623635 }
20752075 // we still need to add it for hyperspace cases
20762076 R_AddDrawSurfCmd( drawSurfs, numDrawSurfs );
20772077 return;
2078 }
2079
2080 // if we overflowed MAX_DRAWSURFS, the drawsurfs
2081 // wrapped around in the buffer and we will be missing
2082 // the first surfaces, not the last ones
2083 if ( numDrawSurfs > MAX_DRAWSURFS ) {
2084 numDrawSurfs = MAX_DRAWSURFS;
20852078 }
20862079
20872080 // sort the drawsurfs by sort type, then orientation, then shader
23212314 */
23222315 void R_RenderView( viewParms_t *parms ) {
23232316 int firstDrawSurf;
2317 int numDrawSurfs;
23242318
23252319 if ( parms->viewportWidth <= 0 || parms->viewportHeight <= 0 ) {
23262320 return;
23432337
23442338 R_GenerateDrawSurfs();
23452339
2346 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, tr.refdef.numDrawSurfs - firstDrawSurf );
2340 // if we overflowed MAX_DRAWSURFS, the drawsurfs
2341 // wrapped around in the buffer and we will be missing
2342 // the first surfaces, not the last ones
2343 numDrawSurfs = tr.refdef.numDrawSurfs;
2344 if ( numDrawSurfs > MAX_DRAWSURFS ) {
2345 numDrawSurfs = MAX_DRAWSURFS;
2346 }
2347
2348 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, numDrawSurfs - firstDrawSurf );
23472349
23482350 // draw main system development information (surface outlines, etc)
23492351 R_FogOff();
116116
117117 if ( bundle->numImageAnimations <= 1 ) {
118118 if ( bundle->isLightmap && ( backEnd.refdef.rdflags & RDF_SNOOPERVIEW ) ) {
119 GL_BindToTMU( tr.whiteImage, 0 );
119 GL_BindToTMU( tr.whiteImage, tmu );
120120 } else {
121121 GL_BindToTMU( bundle->image[0], tmu);
122122 }
134134 index %= bundle->numImageAnimations;
135135
136136 if ( bundle->isLightmap && ( backEnd.refdef.rdflags & RDF_SNOOPERVIEW ) ) {
137 GL_BindToTMU( tr.whiteImage, 0 );
137 GL_BindToTMU( tr.whiteImage, tmu );
138138 } else {
139139 GL_BindToTMU( bundle->image[ index ], tmu );
140140 }
13861386 GLSL_SetUniformFloat(sp, UNIFORM_FOGEYET, eyeT);
13871387 }
13881388
1389 GL_State( pStage->stateBits );
1390
13911389 {
13921390 vec4_t baseColor;
13931391 vec4_t vertColor;
1392 int fadeStart, fadeEnd;
13941393
13951394 ComputeShaderColors(pStage, baseColor, vertColor, pStage->stateBits);
1395
1396 //----(SA) fading model stuff
1397 if ( backEnd.currentEntity )
1398 {
1399 fadeStart = backEnd.currentEntity->e.fadeStartTime;
1400 }
1401 else
1402 {
1403 fadeStart = 0;
1404 }
1405
1406 if ( fadeStart )
1407 {
1408 fadeEnd = backEnd.currentEntity->e.fadeEndTime;
1409
1410 if ( fadeStart > tr.refdef.time )
1411 {
1412 // has not started to fade yet
1413 GL_State( pStage->stateBits );
1414 }
1415 else
1416 {
1417 unsigned int tempState;
1418 float alphaval;
1419
1420 if ( fadeEnd < tr.refdef.time )
1421 {
1422 // entity faded out completely
1423 continue;
1424 }
1425
1426 alphaval = (float)( fadeEnd - tr.refdef.time ) / (float)( fadeEnd - fadeStart );
1427
1428 tempState = pStage->stateBits;
1429 // remove the current blend, and don't write to Z buffer
1430 tempState &= ~( GLS_SRCBLEND_BITS | GLS_DSTBLEND_BITS | GLS_DEPTHMASK_TRUE );
1431 // set the blend to src_alpha, dst_one_minus_src_alpha
1432 tempState |= ( GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA );
1433 GL_State( tempState );
1434 GL_Cull( CT_FRONT_SIDED );
1435 // modulate the alpha component of each vertex in the render list
1436 baseColor[3] *= alphaval;
1437 vertColor[3] *= alphaval;
1438 }
1439 }
1440 else
1441 {
1442 GL_State( pStage->stateBits );
1443 }
1444 //----(SA) end
13961445
13971446 if ((backEnd.refdef.colorScale != 1.0f) && !(backEnd.refdef.rdflags & RDF_NOWORLDMODEL))
13981447 {
552552 }
553553 }
554554
555 #define MAC_EVENT_PUMP_MSEC 5
556555
557556 /*
558557 ==================
123123
124124 /*
125125 ============
126 R_GetCommandBuffer
126 R_GetCommandBufferReserved
127127
128128 make sure there is enough command space
129129 ============
130130 */
131 void *R_GetCommandBuffer( int bytes ) {
131 void *R_GetCommandBufferReserved( int bytes, int reservedBytes ) {
132132 renderCommandList_t *cmdList;
133133
134134 if ( !tr.registered ) { //DAJ BUGFIX
138138 bytes = PAD(bytes, sizeof(void *));
139139
140140 // always leave room for the end of list command
141 if ( cmdList->used + bytes + 4 > MAX_RENDER_COMMANDS ) {
142 if ( bytes > MAX_RENDER_COMMANDS - 4 ) {
141 if ( cmdList->used + bytes + sizeof( int ) + reservedBytes > MAX_RENDER_COMMANDS ) {
142 if ( bytes > MAX_RENDER_COMMANDS - sizeof( int ) ) {
143143 ri.Error( ERR_FATAL, "R_GetCommandBuffer: bad size %i", bytes );
144144 }
145145 // if we run out of room, just start dropping commands
149149 cmdList->used += bytes;
150150
151151 return cmdList->cmds + cmdList->used - bytes;
152 }
153
154
155 /*
156 =============
157 R_GetCommandBuffer
158
159 returns NULL if there is not enough space for important commands
160 =============
161 */
162 void *R_GetCommandBuffer( int bytes ) {
163 return R_GetCommandBufferReserved( bytes, PAD( sizeof( swapBuffersCommand_t ), sizeof(void *) ) );
152164 }
153165
154166
558570 if ( !tr.registered ) {
559571 return;
560572 }
561 cmd = R_GetCommandBuffer( sizeof( *cmd ) );
573 cmd = R_GetCommandBufferReserved( sizeof( *cmd ), 0 );
562574 if ( !cmd ) {
563575 return;
564576 }
14281428 // we still need to add it for hyperspace cases
14291429 R_AddDrawSurfCmd( drawSurfs, numDrawSurfs );
14301430 return;
1431 }
1432
1433 // if we overflowed MAX_DRAWSURFS, the drawsurfs
1434 // wrapped around in the buffer and we will be missing
1435 // the first surfaces, not the last ones
1436 if ( numDrawSurfs > MAX_DRAWSURFS ) {
1437 numDrawSurfs = MAX_DRAWSURFS;
14381431 }
14391432
14401433 // sort the drawsurfs by sort type, then orientation, then shader
16731666 */
16741667 void R_RenderView( viewParms_t *parms ) {
16751668 int firstDrawSurf;
1669 int numDrawSurfs;
16761670
16771671 if ( parms->viewportWidth <= 0 || parms->viewportHeight <= 0 ) {
16781672 return;
16951689
16961690 R_GenerateDrawSurfs();
16971691
1698 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, tr.refdef.numDrawSurfs - firstDrawSurf );
1692 // if we overflowed MAX_DRAWSURFS, the drawsurfs
1693 // wrapped around in the buffer and we will be missing
1694 // the first surfaces, not the last ones
1695 numDrawSurfs = tr.refdef.numDrawSurfs;
1696 if ( numDrawSurfs > MAX_DRAWSURFS ) {
1697 numDrawSurfs = MAX_DRAWSURFS;
1698 }
1699
1700 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, numDrawSurfs - firstDrawSurf );
16991701
17001702 // draw main system development information (surface outlines, etc)
17011703 R_FogOff();
428428 else
429429 perChannelColorBits = 4;
430430
431 #ifndef USE_OPENGLES
432431 #ifdef __sgi /* Fix for SGIs grabbing too many bits of color */
433432 if (perChannelColorBits == 4)
434433 perChannelColorBits = 0; /* Use minimum size for 16-bit color */
435434
436435 /* Need alpha or else SGIs choose 36+ bit RGB mode */
437 SDL_GL_SetAttribute( SDL_GL_ALPHA_SIZE, 1);
436 SDL_GL_SetAttribute( SDL_GL_ALPHA_SIZE, 1 );
437 #endif
438
439 #ifdef USE_OPENGLES
440 SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 1 );
438441 #endif
439442
440443 SDL_GL_SetAttribute( SDL_GL_RED_SIZE, perChannelColorBits );
449452 if(r_stereoEnabled->integer)
450453 {
451454 glConfig.stereoEnabled = qtrue;
452 SDL_GL_SetAttribute(SDL_GL_STEREO, 1);
455 SDL_GL_SetAttribute( SDL_GL_STEREO, 1 );
453456 }
454457 else
455458 {
456459 glConfig.stereoEnabled = qfalse;
457 SDL_GL_SetAttribute(SDL_GL_STEREO, 0);
460 SDL_GL_SetAttribute( SDL_GL_STEREO, 0 );
458461 }
459462
460463 SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );
521524 continue;
522525 }
523526 }
524 #else
525 SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1);
526
527 if( ( SDL_window = SDL_CreateWindow( CLIENT_WINDOW_TITLE, x, y,
528 glConfig.vidWidth, glConfig.vidHeight, flags ) ) == NULL )
529 {
530 ri.Printf( PRINT_DEVELOPER, "SDL_CreateWindow failed: %s\n", SDL_GetError( ) );
531 continue;
532 }
533 #endif // USE_OPENGLES
534527
535528 SDL_SetWindowIcon( SDL_window, icon );
536529
1616 RASPBERRY_PI=1 \
1717 USE_MUMBLE=0 \
1818 BUILD_GAME_SO=1 \
19 BUILD_GAME_QVM=0 \
2019 BUILD_RENDERER_REND2=0 \
2120 ARCH=arm \
2221 PLATFORM=linux \
66 COMPILE_PLATFORM=$(shell uname|sed -e s/_.*//|tr '[:upper:]' '[:lower:]'|sed -e 's/\//_/g')
77
88 COMPILE_ARCH=$(shell uname -m | sed -e s/i.86/i386/ | sed -e 's/^arm.*/arm/')
9
10 ARM_VER_CHECK=$(shell uname -m)
911
1012 ifeq ($(COMPILE_PLATFORM),sunos)
1113 # Solaris uname and GNU uname differ
126128 export CROSS_COMPILING
127129
128130 ifndef VERSION
129 VERSION=1.42c
131 VERSION=1.42d
130132 endif
131133
132134 ifndef CLIENTBIN
186188 endif
187189
188190 ifndef USE_CURL
189 USE_CURL=1
191 USE_CURL=0
190192 endif
191193
192194 ifndef USE_CURL_DLOPEN
214216 endif
215217
216218 ifndef USE_FREETYPE
217 USE_FREETYPE=0
219 USE_FREETYPE=1
218220 endif
219221
220222 ifndef USE_INTERNAL_LIBS
367369 ifeq ($(ARCH),x86_64)
368370 OPTIMIZEVM = -O3
369371 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
370 HAVE_VM_COMPILED = true
371 else
372 endif
372373 ifeq ($(ARCH),x86)
373374 OPTIMIZEVM = -O3 -march=i586
374375 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
375 HAVE_VM_COMPILED=true
376 else
376 endif
377377 ifeq ($(ARCH),ppc)
378378 BASE_CFLAGS += -maltivec
379 HAVE_VM_COMPILED=true
380379 endif
381380 ifeq ($(ARCH),ppc64)
382381 BASE_CFLAGS += -maltivec
383 HAVE_VM_COMPILED=true
384382 endif
385383 ifeq ($(ARCH),sparc)
386384 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
387385 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
388 HAVE_VM_COMPILED=true
386 endif
387 ifeq ($(ARCH),sparc64)
388 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
389 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
389390 endif
390391 ifeq ($(ARCH),alpha)
391392 # According to http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=410555
392393 # -ffast-math will cause the client to die with SIGFPE on Alpha
393394 OPTIMIZE = $(OPTIMIZEVM)
394 endif
395 endif
396395 endif
397396
398397 SHLIBEXT=so
455454 #############################################################################
456455
457456 ifeq ($(PLATFORM),darwin)
458 HAVE_VM_COMPILED=true
459457 LIBS = -framework Cocoa
460458 CLIENT_LIBS=
461459 RENDERER_LIBS=
635633 ifeq ($(ARCH),x86_64)
636634 OPTIMIZEVM = -O3
637635 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
638 HAVE_VM_COMPILED = true
639636 FILE_ARCH=x64
640637 endif
641638 ifeq ($(ARCH),x86)
642639 OPTIMIZEVM = -O3 -march=i586
643640 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
644 HAVE_VM_COMPILED = true
645641 endif
646642
647643 SHLIBEXT=dll
737733 -Wall -fno-strict-aliasing \
738734 -DUSE_ICON -DMAP_ANONYMOUS=MAP_ANON
739735 CLIENT_CFLAGS += $(SDL_CFLAGS)
740 HAVE_VM_COMPILED = true
741736
742737 OPTIMIZEVM = -O3
743738 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
798793 ifeq ($(ARCH),x86_64)
799794 OPTIMIZEVM = -O3
800795 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
801 HAVE_VM_COMPILED = true
802 else
796 endif
803797 ifeq ($(ARCH),x86)
804798 OPTIMIZEVM = -O3 -march=i586
805799 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
806 HAVE_VM_COMPILED=true
807 else
800 endif
808801 ifeq ($(ARCH),ppc)
809802 BASE_CFLAGS += -maltivec
810 HAVE_VM_COMPILED=true
811803 endif
812804 ifeq ($(ARCH),ppc64)
813805 BASE_CFLAGS += -maltivec
814 HAVE_VM_COMPILED=true
806 endif
807 ifeq ($(ARCH),sparc)
808 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
809 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
815810 endif
816811 ifeq ($(ARCH),sparc64)
817812 OPTIMIZE += -mtune=ultrasparc3 -mv8plus
818813 OPTIMIZEVM += -mtune=ultrasparc3 -mv8plus
819 HAVE_VM_COMPILED=true
820814 endif
821815 ifeq ($(ARCH),alpha)
822816 # According to http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=410555
823817 # -ffast-math will cause the client to die with SIGFPE on Alpha
824818 OPTIMIZE = $(OPTIMIZEVM)
825 endif
826 endif
827819 endif
828820
829821 ifeq ($(USE_CURL),1)
873865
874866 BASE_CFLAGS = -Wall -fno-strict-aliasing
875867
876 ifeq ($(ARCH),x86)
877 HAVE_VM_COMPILED=true
878 endif
879
880868 BUILD_CLIENT = 0
881869 else # ifeq netbsd
882870
934922
935923 ifeq ($(ARCH),sparc)
936924 OPTIMIZEVM += -O3 -mtune=ultrasparc3 -mv8plus -mno-faster-structs
937 HAVE_VM_COMPILED=true
938 else
925 endif
939926 ifeq ($(ARCH),x86)
940927 OPTIMIZEVM += -march=i586
941 HAVE_VM_COMPILED=true
942928 BASE_CFLAGS += -m32
943929 CLIENT_CFLAGS += -I/usr/X11/include/NVIDIA
944930 CLIENT_LDFLAGS += -L/usr/X11/lib/NVIDIA -R/usr/X11/lib/NVIDIA
945 endif
946931 endif
947932
948933 OPTIMIZE = $(OPTIMIZEVM) -ffast-math
986971
987972 ifndef RANLIB
988973 RANLIB=ranlib
974 endif
975
976 ifndef HAVE_VM_COMPILED
977 HAVE_VM_COMPILED=false
978 endif
979
980 ifneq ($(findstring $(ARCH),x86 x86_64 ppc ppc64 sparc sparc64),)
981 HAVE_VM_COMPILED=true
982 endif
983
984 ifeq ($(ARM_VER_CHECK),armv7l)
985 HAVE_VM_COMPILED=true
989986 endif
990987
991988 ifneq ($(HAVE_VM_COMPILED),true)
13171314
13181315 NAKED_TARGETS=$(shell echo $(TARGETS) | sed -e "s!$(B)/!!g")
13191316
1320 print_list=@for i in $(1); \
1317 print_list=-@for i in $(1); \
13211318 do \
13221319 echo " $$i"; \
13231320 done
13391336 @echo " VERSION: $(VERSION)"
13401337 @echo " COMPILE_PLATFORM: $(COMPILE_PLATFORM)"
13411338 @echo " COMPILE_ARCH: $(COMPILE_ARCH)"
1339 @echo " HAVE_VM_COMPILED: $(HAVE_VM_COMPILED)"
13421340 @echo " CC: $(CC)"
13431341 @echo " CXX: $(CXX)"
13441342 ifdef MINGW
21232121
21242122 ifeq ($(HAVE_VM_COMPILED),true)
21252123 ifneq ($(findstring $(ARCH),x86 x86_64),)
2126 Q3OBJ += \
2127 $(B)/client/vm_x86.o
2124 Q3OBJ += $(B)/client/vm_x86.o
21282125 endif
21292126 ifneq ($(findstring $(ARCH),ppc ppc64),)
21302127 Q3OBJ += $(B)/client/vm_powerpc.o $(B)/client/vm_powerpc_asm.o
21312128 endif
2132 ifeq ($(ARCH),sparc)
2129 ifneq ($(findstring $(ARCH),sparc sparc64),)
21332130 Q3OBJ += $(B)/client/vm_sparc.o
2131 endif
2132 ifeq ($(ARM_VER_CHECK),armv7l)
2133 Q3OBJ += $(B)/client/vm_armv7l.o
21342134 endif
21352135 endif
21362136
22922292
22932293 ifeq ($(HAVE_VM_COMPILED),true)
22942294 ifneq ($(findstring $(ARCH),x86 x86_64),)
2295 Q3DOBJ += \
2296 $(B)/ded/vm_x86.o
2295 Q3DOBJ += $(B)/ded/vm_x86.o
22972296 endif
22982297 ifneq ($(findstring $(ARCH),ppc ppc64),)
22992298 Q3DOBJ += $(B)/ded/vm_powerpc.o $(B)/ded/vm_powerpc_asm.o
23002299 endif
2301 ifeq ($(ARCH),sparc)
2300 ifneq ($(findstring $(ARCH),sparc sparc64),)
23022301 Q3DOBJ += $(B)/ded/vm_sparc.o
2302 endif
2303 ifeq ($(ARM_VER_CHECK),armv7l)
2304 Q3DOBJ += $(B)/ded/vm_armv7l.o
23032305 endif
23042306 endif
23052307
18391839 char message[MAX_RCON_MESSAGE];
18401840 netadr_t to;
18411841
1842 if ( !rcon_client_password->string ) {
1842 if ( !rcon_client_password->string[0] ) {
18431843 Com_Printf( "You must set 'rcon_password' before\n"
18441844 "issuing an rcon command.\n" );
18451845 return;
881881 }
882882 }
883883
884 /*
884885 // set up current episode (for notebook de-briefing tabs)
885886 trap_Cvar_Register( &cvar, "g_episode", "0", CVAR_ROM );
886887 trap_Cvar_Set( "g_episode", va( "%d", ent->missionLevel ) );
887
888 */
888889 }
889890
890891 }
13791380 cast_state_t *cs;
13801381 qtime_t tm;
13811382 qboolean serverEntityUpdate = qfalse;
1383 vmCvar_t episode;
13821384
13831385 if ( g_gametype.integer != GT_SINGLE_PLAYER ) { // don't allow loads in MP
13841386 return;
14331435 // read the 'episode'
14341436 if ( ver >= 13 ) {
14351437 trap_FS_Read( &i, sizeof( i ), f );
1438 trap_Cvar_Register( &episode, "g_episode", "0", CVAR_ROM );
14361439 trap_Cvar_Set( "g_episode", va( "%i", i ) );
14371440 }
14381441 //----(SA) end
19001900 void Com_ExecuteCfg(void)
19011901 {
19021902 Cbuf_ExecuteText(EXEC_NOW, "exec default.cfg\n");
1903 if ( FS_ReadFile( "language.cfg", NULL ) > 0 ) {
1904 Cbuf_ExecuteText(EXEC_APPEND, "exec language.cfg\n");
1905 } else if ( FS_ReadFile( "Language.cfg", NULL ) > 0 ) {
1906 Cbuf_ExecuteText(EXEC_APPEND, "exec Language.cfg\n");
1907 }
19031908 Cbuf_Execute(); // Always execute after exec to prevent text buffer overflowing
19041909
19051910 if(!Com_SafeMode())
182182 1886207346u
183183 };
184184
185 static const unsigned int sppak_checksums[] = {
185 static const unsigned int en_sppak_checksums[] = {
186186 2837138611u,
187187 3033901371u,
188188 483593179u,
189189 // sp_pak4.pk3 from GOTY edition
190 // 4131017020u
190 4131017020u
191191 };
192192
193193 static const unsigned int fr_sppak_checksums[] = {
194194 2183777857u,
195195 3033901371u,
196 483593179u,
196 839012592u,
197197 // sp_pak4.pk3 from GOTY edition
198 // 4131017020u
198 4131017020u
199 };
200
201 static const unsigned int it_sppak_checksums[] = {
202 3826630960u,
203 3033901371u,
204 652965486u,
205 // sp_pak4.pk3 from GOTY edition
206 4131017020u
207 };
208
209 static const unsigned int sp_sppak_checksums[] = {
210 652879493u,
211 3033901371u,
212 1162920123u,
213 // sp_pak4.pk3 from GOTY edition
214 4131017020u
199215 };
200216
201217 // if this is defined, the executable positively won't work with any paks other
37673783 && strlen(pakBasename) == 7 && !Q_stricmpn( pakBasename, "sp_pak", 6 )
37683784 && pakBasename[6] >= '1' && pakBasename[6] <= '1' + NUM_SP_PAKS - 1)
37693785 {
3770 if( curpack->checksum != sppak_checksums[pakBasename[6]-'1'] && curpack->checksum != fr_sppak_checksums[pakBasename[6]-'1'] )
3786 if( curpack->checksum != en_sppak_checksums[pakBasename[6]-'1'] &&
3787 curpack->checksum != fr_sppak_checksums[pakBasename[6]-'1'] &&
3788 curpack->checksum != it_sppak_checksums[pakBasename[6]-'1'] &&
3789 curpack->checksum != sp_sppak_checksums[pakBasename[6]-'1'] )
37713790 {
37723791 if(pakBasename[6] == '1')
37733792 {
37993818 // Finally check whether this pak's checksum is listed because the user tried
38003819 // to trick us by renaming the file, and set foundPak's highest bit to indicate this case.
38013820
3802 for(index = 0; index < ARRAY_LEN(sppak_checksums); index++)
3821 for(index = 0; index < ARRAY_LEN( en_sppak_checksums ); index++)
38033822 {
3804 if(curpack->checksum == sppak_checksums[index])
3823 if( curpack->checksum == en_sppak_checksums[index] ||
3824 curpack->checksum == fr_sppak_checksums[index] ||
3825 curpack->checksum == it_sppak_checksums[index] ||
3826 curpack->checksum == sp_sppak_checksums[index] )
38053827 {
38063828 Com_Printf("\n\n"
38073829 "**************************************************\n"
7575 #define LEGACY_HEARTBEAT_FOR_MASTER "Wolfenstein-1"
7676
7777 #ifndef PRODUCT_VERSION
78 #define PRODUCT_VERSION "1.42c"
78 #define PRODUCT_VERSION "1.42d"
7979 #endif
8080
8181 #define Q3_VERSION PRODUCT_NAME " " PRODUCT_VERSION
630630 // #define FS_QAGAME_REF 0x08
631631 // number of id paks that will never be autodownloaded from baseq3
632632 #define NUM_ID_PAKS 10
633 #define NUM_SP_PAKS 3
633 #define NUM_SP_PAKS 4
634634
635635 #define MAX_FILE_HANDLES 64
636636
0 /*
1 ===========================================================================
2 Copyright (C) 2009 David S. Miller <davem@davemloft.net>
3 Copyright (C) 2013,2014 SUSE Linux Products GmbH
4
5 This file is part of Quake III Arena source code.
6
7 Quake III Arena source code is free software; you can redistribute it
8 and/or modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 Quake III Arena source code is distributed in the hope that it will be
13 useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with Quake III Arena source code; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 ===========================================================================
21
22 ARMv7l VM by Ludwig Nussel <ludwig.nussel@suse.de>
23
24 TODO: optimization
25
26 Docu:
27 http://www.coranac.com/tonc/text/asm.htm
28 http://www.heyrick.co.uk/armwiki/Category:Opcodes
29 ARMv7-A_ARMv7-R_DDI0406_2007.pdf
30 */
31
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/time.h>
35 #include <time.h>
36 #include <stddef.h>
37
38 #include <sys/types.h>
39 #include <sys/stat.h>
40 #include <fcntl.h>
41
42 #include "vm_local.h"
43 #define R0 0
44 #define R1 1
45 #define R2 2
46 #define R3 3
47 #define R4 4
48
49 #define R12 12
50
51 #define FP 11
52 #define SP 13
53 #define LR 14
54 #define PC 15
55
56 #define APSR_nzcv 15
57
58 #define S14 14
59 #define S15 15
60
61 #define rOPSTACK 5
62 #define rOPSTACKBASE 6
63 #define rCODEBASE 7
64 #define rPSTACK 8
65 #define rDATABASE 9
66 #define rDATAMASK 10
67
68 #define bit(x) (1<<x)
69
70 /* arm eabi, builtin gcc functions */
71 int __aeabi_idiv (int, int);
72 unsigned __aeabi_uidiv (unsigned, unsigned);
73 void __aeabi_idivmod(void);
74 void __aeabi_uidivmod(void);
75
76 /* exit() won't be called but use it because it is marked with noreturn */
77 #define DIE( reason, args... ) \
78 do { \
79 Com_Error(ERR_DROP, "vm_arm compiler error: " reason, ##args); \
80 exit(1); \
81 } while(0)
82
83 /*
84 * opcode information table:
85 * - length of immediate value
86 * - returned register type
87 * - required register(s) type
88 */
89 #define opImm0 0x0000 /* no immediate */
90 #define opImm1 0x0001 /* 1 byte immadiate value after opcode */
91 #define opImm4 0x0002 /* 4 bytes immediate value after opcode */
92
93 #define opRet0 0x0000 /* returns nothing */
94 #define opRetI 0x0004 /* returns integer */
95 #define opRetF 0x0008 /* returns float */
96 #define opRetIF (opRetI | opRetF) /* returns integer or float */
97
98 #define opArg0 0x0000 /* requires nothing */
99 #define opArgI 0x0010 /* requires integer(s) */
100 #define opArgF 0x0020 /* requires float(s) */
101 #define opArgIF (opArgI | opArgF) /* requires integer or float */
102
103 #define opArg2I 0x0040 /* requires second argument, integer */
104 #define opArg2F 0x0080 /* requires second argument, float */
105 #define opArg2IF (opArg2I | opArg2F) /* requires second argument, integer or float */
106
107 static const unsigned char vm_opInfo[256] =
108 {
109 [OP_UNDEF] = opImm0,
110 [OP_IGNORE] = opImm0,
111 [OP_BREAK] = opImm0,
112 [OP_ENTER] = opImm4,
113 /* OP_LEAVE has to accept floats, they will be converted to ints */
114 [OP_LEAVE] = opImm4 | opRet0 | opArgIF,
115 /* only STORE4 and POP use values from OP_CALL,
116 * no need to convert floats back */
117 [OP_CALL] = opImm0 | opRetI | opArgI,
118 [OP_PUSH] = opImm0 | opRetIF,
119 [OP_POP] = opImm0 | opRet0 | opArgIF,
120 [OP_CONST] = opImm4 | opRetIF,
121 [OP_LOCAL] = opImm4 | opRetI,
122 [OP_JUMP] = opImm0 | opRet0 | opArgI,
123
124 [OP_EQ] = opImm4 | opRet0 | opArgI | opArg2I,
125 [OP_NE] = opImm4 | opRet0 | opArgI | opArg2I,
126 [OP_LTI] = opImm4 | opRet0 | opArgI | opArg2I,
127 [OP_LEI] = opImm4 | opRet0 | opArgI | opArg2I,
128 [OP_GTI] = opImm4 | opRet0 | opArgI | opArg2I,
129 [OP_GEI] = opImm4 | opRet0 | opArgI | opArg2I,
130 [OP_LTU] = opImm4 | opRet0 | opArgI | opArg2I,
131 [OP_LEU] = opImm4 | opRet0 | opArgI | opArg2I,
132 [OP_GTU] = opImm4 | opRet0 | opArgI | opArg2I,
133 [OP_GEU] = opImm4 | opRet0 | opArgI | opArg2I,
134 [OP_EQF] = opImm4 | opRet0 | opArgF | opArg2F,
135 [OP_NEF] = opImm4 | opRet0 | opArgF | opArg2F,
136 [OP_LTF] = opImm4 | opRet0 | opArgF | opArg2F,
137 [OP_LEF] = opImm4 | opRet0 | opArgF | opArg2F,
138 [OP_GTF] = opImm4 | opRet0 | opArgF | opArg2F,
139 [OP_GEF] = opImm4 | opRet0 | opArgF | opArg2F,
140
141 [OP_LOAD1] = opImm0 | opRetI | opArgI,
142 [OP_LOAD2] = opImm0 | opRetI | opArgI,
143 [OP_LOAD4] = opImm0 | opRetIF| opArgI,
144 [OP_STORE1] = opImm0 | opRet0 | opArgI | opArg2I,
145 [OP_STORE2] = opImm0 | opRet0 | opArgI | opArg2I,
146 [OP_STORE4] = opImm0 | opRet0 | opArgIF| opArg2I,
147 [OP_ARG] = opImm1 | opRet0 | opArgIF,
148 [OP_BLOCK_COPY] = opImm4 | opRet0 | opArgI | opArg2I,
149
150 [OP_SEX8] = opImm0 | opRetI | opArgI,
151 [OP_SEX16] = opImm0 | opRetI | opArgI,
152 [OP_NEGI] = opImm0 | opRetI | opArgI,
153 [OP_ADD] = opImm0 | opRetI | opArgI | opArg2I,
154 [OP_SUB] = opImm0 | opRetI | opArgI | opArg2I,
155 [OP_DIVI] = opImm0 | opRetI | opArgI | opArg2I,
156 [OP_DIVU] = opImm0 | opRetI | opArgI | opArg2I,
157 [OP_MODI] = opImm0 | opRetI | opArgI | opArg2I,
158 [OP_MODU] = opImm0 | opRetI | opArgI | opArg2I,
159 [OP_MULI] = opImm0 | opRetI | opArgI | opArg2I,
160 [OP_MULU] = opImm0 | opRetI | opArgI | opArg2I,
161 [OP_BAND] = opImm0 | opRetI | opArgI | opArg2I,
162 [OP_BOR] = opImm0 | opRetI | opArgI | opArg2I,
163 [OP_BXOR] = opImm0 | opRetI | opArgI | opArg2I,
164 [OP_BCOM] = opImm0 | opRetI | opArgI,
165 [OP_LSH] = opImm0 | opRetI | opArgI | opArg2I,
166 [OP_RSHI] = opImm0 | opRetI | opArgI | opArg2I,
167 [OP_RSHU] = opImm0 | opRetI | opArgI | opArg2I,
168 [OP_NEGF] = opImm0 | opRetF | opArgF,
169 [OP_ADDF] = opImm0 | opRetF | opArgF | opArg2F,
170 [OP_SUBF] = opImm0 | opRetF | opArgF | opArg2F,
171 [OP_DIVF] = opImm0 | opRetF | opArgF | opArg2F,
172 [OP_MULF] = opImm0 | opRetF | opArgF | opArg2F,
173 [OP_CVIF] = opImm0 | opRetF | opArgI,
174 [OP_CVFI] = opImm0 | opRetI | opArgF,
175 };
176
177 #ifdef DEBUG_VM
178 static const char *opnames[256] = {
179 "OP_UNDEF", "OP_IGNORE", "OP_BREAK", "OP_ENTER", "OP_LEAVE", "OP_CALL",
180 "OP_PUSH", "OP_POP", "OP_CONST", "OP_LOCAL", "OP_JUMP",
181 "OP_EQ", "OP_NE", "OP_LTI", "OP_LEI", "OP_GTI", "OP_GEI",
182 "OP_LTU", "OP_LEU", "OP_GTU", "OP_GEU", "OP_EQF", "OP_NEF",
183 "OP_LTF", "OP_LEF", "OP_GTF", "OP_GEF",
184 "OP_LOAD1", "OP_LOAD2", "OP_LOAD4", "OP_STORE1", "OP_STORE2",
185 "OP_STORE4", "OP_ARG", "OP_BLOCK_COPY",
186 "OP_SEX8", "OP_SEX16",
187 "OP_NEGI", "OP_ADD", "OP_SUB", "OP_DIVI", "OP_DIVU",
188 "OP_MODI", "OP_MODU", "OP_MULI", "OP_MULU", "OP_BAND",
189 "OP_BOR", "OP_BXOR", "OP_BCOM", "OP_LSH", "OP_RSHI", "OP_RSHU",
190 "OP_NEGF", "OP_ADDF", "OP_SUBF", "OP_DIVF", "OP_MULF",
191 "OP_CVIF", "OP_CVFI",
192 };
193
194 #define NOTIMPL(x) \
195 do { Com_Error(ERR_DROP, "instruction not implemented: %s", opnames[x]); } while(0)
196 #else
197 #define NOTIMPL(x) \
198 do { Com_Printf(S_COLOR_RED "instruction not implemented: %x\n", x); vm->compiled = qfalse; return; } while(0)
199 #endif
200
201 static void VM_Destroy_Compiled(vm_t *vm)
202 {
203 if (vm->codeBase) {
204 if (munmap(vm->codeBase, vm->codeLength))
205 Com_Printf(S_COLOR_RED "Memory unmap failed, possible memory leak\n");
206 }
207 vm->codeBase = NULL;
208 }
209
210 /*
211 =================
212 ErrJump
213 Error handler for jump/call to invalid instruction number
214 =================
215 */
216
217 static void __attribute__((__noreturn__)) ErrJump(unsigned num)
218 {
219 Com_Error(ERR_DROP, "program tried to execute code outside VM (%x)", num);
220 }
221
222 static int asmcall(int call, int pstack)
223 {
224 // save currentVM so as to allow for recursive VM entry
225 vm_t *savedVM = currentVM;
226 int i, ret;
227
228 // modify VM stack pointer for recursive VM entry
229 currentVM->programStack = pstack - 4;
230
231 if (sizeof(intptr_t) == sizeof(int)) {
232 intptr_t *argPosition = (intptr_t *)((byte *)currentVM->dataBase + pstack + 4);
233 argPosition[0] = -1 - call;
234 ret = currentVM->systemCall(argPosition);
235 } else {
236 intptr_t args[MAX_VMSYSCALL_ARGS];
237
238 args[0] = -1 - call;
239 int *argPosition = (int *)((byte *)currentVM->dataBase + pstack + 4);
240 for( i = 1; i < ARRAY_LEN(args); i++ )
241 args[i] = argPosition[i];
242
243 ret = currentVM->systemCall(args);
244 }
245
246 currentVM = savedVM;
247
248 return ret;
249 }
250
251 void _emit(vm_t *vm, unsigned isn, int pass)
252 {
253 #if 0
254 static int fd = -2;
255 if (fd == -2)
256 fd = open("code.bin", O_TRUNC|O_WRONLY|O_CREAT, 0644);
257 if (fd > 0)
258 write(fd, &isn, 4);
259 #endif
260
261 if (pass)
262 memcpy(vm->codeBase+vm->codeLength, &isn, 4);
263 vm->codeLength+=4;
264 }
265
266 #define emit(isn) _emit(vm, isn, pass)
267
268 static unsigned char off8(unsigned val)
269 {
270 if (val&3)
271 DIE("offset must be multiple of four");
272 if (val > 1020)
273 DIE("offset too large");
274 return val>>2;
275 }
276
277 // ARM is really crazy ...
278 static unsigned short rimm(unsigned val)
279 {
280 unsigned shift = 0;
281 if (val < 256)
282 return val;
283 // rotate the value until it fits
284 while (shift < 16 && (val>255 || !(val&3))) {
285 val = (val&3)<<30 | val>>2;
286 ++shift;
287 }
288 if (shift > 15 || val > 255) {
289 DIE("immediate cannot be encoded (%d, %d)\n", shift, val);
290 }
291 return (16-shift)<<8 | val;
292 }
293
294 // same as rimm but doesn't die, returns 0 if not encodable so don't call with zero as argument!
295 static unsigned short can_encode(unsigned val)
296 {
297 unsigned shift = 0;
298 if (!val)
299 DIE("can_encode: invalid argument");
300 if (val < 256)
301 return val;
302 // rotate the value until it fits
303 while (shift < 16 && (val>255 || !(val&3))) {
304 val = (val&3)<<30 | val>>2;
305 ++shift;
306 }
307 if (shift > 15 || val > 255) {
308 return 0;
309 }
310 return (16-shift)<<8 | val;
311 }
312
313 #define PREINDEX (1<<24)
314
315 #define rASR(i, reg) (0b10<<5 | ((i&31)<<7) | reg)
316 #define rLSL(i, reg) (0b00<<5 | ((i&31)<<7) | reg)
317 #define rLSR(i, reg) (0b01<<5 | ((i&31)<<7) | reg)
318 #define rROR(i, reg) (0b11<<5 | ((i&31)<<7) | reg)
319
320 // conditions
321 #define EQ (0b0000<<28)
322 #define NE (0b0001<<28)
323 #define CS (0b0010<<28)
324 #define HS CS
325 #define CC (0b0011<<28)
326 #define LO CC
327 #define MI (0b0100<<28)
328 #define PL (0b0101<<28)
329 #define VS (0b0110<<28)
330 #define VC (0b0111<<28)
331 #define HI (0b1000<<28)
332 #define LS (0b1001<<28)
333 #define GE (0b1010<<28)
334 #define LT (0b1011<<28)
335 #define GT (0b1100<<28)
336 #define LE (0b1101<<28)
337 #define AL (0b1110<<28)
338 #define cond(what, op) (what | (op&~AL))
339
340 // XXX: v not correctly computed
341 #define BKPT(v) (AL | 0b10010<<20 | ((v&~0xF)<<4) | 0b0111<<4 | (v&0xF))
342
343 #define YIELD (0b110010<<20 | 0b1111<<12 | 1)
344 #define NOP cond(AL, YIELD)
345
346 // immediate value must fit in 0xFF!
347 #define ANDi(dst, src, i) (AL | (0b001<<25) | (0b00000<<20) | (src<<16) | (dst<<12) | rimm(i))
348 #define EORi(dst, src, i) (AL | (0b001<<25) | (0b00010<<20) | (src<<16) | (dst<<12) | rimm(i))
349 #define SUBi(dst, src, i) (AL | (0b001<<25) | (0b00100<<20) | (src<<16) | (dst<<12) | rimm(i))
350 #define RSBi(dst, src, i) (AL | (0b001<<25) | (0b00110<<20) | (src<<16) | (dst<<12) | rimm(i))
351 #define ADDi(dst, src, i) (AL | (0b001<<25) | (0b01000<<20) | (src<<16) | (dst<<12) | rimm(i))
352 #define ADCi(dst, src, i) (AL | (0b001<<25) | (0b01010<<20) | (src<<16) | (dst<<12) | rimm(i))
353 #define SBCi(dst, src, i) (AL | (0b001<<25) | (0b01100<<20) | (src<<16) | (dst<<12) | rimm(i))
354 #define RSCi(dst, src, i) (AL | (0b001<<25) | (0b01110<<20) | (src<<16) | (dst<<12) | rimm(i))
355
356 #define ORRi(dst, src, i) (AL | (0b001<<25) | (0b11000<<20) | (src<<16) | (dst<<12) | rimm(i))
357 #define MOVi(dst, i) (AL | (0b001<<25) | (0b11010<<20) | (dst<<12) | rimm(i))
358 #define BICi(dst, src, i) (AL | (0b001<<25) | (0b11100<<20) | (src<<16) | (dst<<12) | rimm(i))
359 #define MVNi(dst, i) (AL | (0b001<<25) | (0b11110<<20) | (dst<<12) | rimm(i))
360
361 #define MOVW(dst, i) (AL | (0b11<<24) | ((((i)>>12)&0xF)<<16) | (dst<<12) | ((i)&((1<<12)-1)))
362 #define MOVT(dst, i) (AL | (0b11<<24) | (0b0100<<20) | ((((i)>>12)&0xF)<<16) | (dst<<12) | ((i)&((1<<12)-1)))
363
364 #define TSTi( src, i) (AL | (0b001<<25) | (0b10001<<20) | (src<<16) | rimm(i))
365 #define TEQi( src, i) (AL | (0b001<<25) | (0b10011<<20) | (src<<16) | rimm(i))
366 #define CMPi( src, i) (AL | (0b001<<25) | (0b10101<<20) | (src<<16) | rimm(i))
367 #define CMNi( src, i) (AL | (0b001<<25) | (0b10111<<20) | (src<<16) | rimm(i))
368
369 #define ANDSi(dst, src, i) (ANDi(dst, src, i) | (1<<20))
370 #define EORSi(dst, src, i) (EORi(dst, src, i) | (1<<20))
371 #define SUBSi(dst, src, i) (SUBi(dst, src, i) | (1<<20))
372 #define RSBSi(dst, src, i) (RSBi(dst, src, i) | (1<<20))
373 #define ADDSi(dst, src, i) (ADDi(dst, src, i) | (1<<20))
374 #define ADCSi(dst, src, i) (ADCi(dst, src, i) | (1<<20))
375 #define SBCSi(dst, src, i) (SBCi(dst, src, i) | (1<<20))
376 #define RSCSi(dst, src, i) (RSCi(dst, src, i) | (1<<20))
377
378 #define ORRSi(dst, src, i) (ORRi(dst, src, i) | (1<<20))
379 #define MOVSi(dst, i) (MOVi(dst, i) | (1<<20))
380 #define BICSi(dst, src, i) (BICi(dst, src, i) | (1<<20))
381 #define MVNSi(dst, i) (MVNi(dst, src, i) | (1<<20))
382
383 #define AND(dst, src, reg) (AL | (0b000<<25) | (0b00000<<20) | (src<<16) | (dst<<12) | reg)
384 #define EOR(dst, src, reg) (AL | (0b000<<25) | (0b00010<<20) | (src<<16) | (dst<<12) | reg)
385 #define SUB(dst, src, reg) (AL | (0b000<<25) | (0b00100<<20) | (src<<16) | (dst<<12) | reg)
386 #define RSB(dst, src, reg) (AL | (0b000<<25) | (0b00110<<20) | (src<<16) | (dst<<12) | reg)
387 #define ADD(dst, src, reg) (AL | (0b000<<25) | (0b01000<<20) | (src<<16) | (dst<<12) | reg)
388 #define ADC(dst, src, reg) (AL | (0b000<<25) | (0b01010<<20) | (src<<16) | (dst<<12) | reg)
389 #define SBC(dst, src, reg) (AL | (0b000<<25) | (0b01100<<20) | (src<<16) | (dst<<12) | reg)
390 #define RSC(dst, src, reg) (AL | (0b000<<25) | (0b01110<<20) | (src<<16) | (dst<<12) | reg)
391
392 #define ORR(dst, src, reg) (AL | (0b000<<25) | (0b11000<<20) | (src<<16) | (dst<<12) | reg)
393 #define MOV(dst, src) (AL | (0b000<<25) | (0b11010<<20) | (dst<<12) | src)
394
395 #define LSL(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0001<<4) | src)
396 #define LSR(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0011<<4) | src)
397 #define ASR(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0101<<4) | src)
398 #define ROR(dst, src, reg) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (reg<<8) | (0b0111<<4) | src)
399
400 #define LSLi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b000<<4) | src)
401 #define LSRi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b010<<4) | src)
402 #define ASRi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b100<<4) | src)
403 #define RORi(dst, src, i) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | ((i&0x1F)<<7) | (0b110<<4) | src)
404 #define RRX(dst, src) (AL | (0b000<<25) | (0b1101<<21) | (0<<20) | (dst<<12) | (0b110<<4) | src)
405
406 #define BIC(dst, src, reg) (AL | (0b000<<25) | (0b11100<<20) | (src<<16) | (dst<<12) | reg)
407 #define MVN(dst, reg) (AL | (0b000<<25) | (0b11110<<20) | (dst<<12) | reg)
408
409 #define TST( src, reg) (AL | (0b000<<25) | (0b10001<<20) | (src<<16) | reg)
410 #define TEQ( src, reg) (AL | (0b000<<25) | (0b10011<<20) | (src<<16) | reg)
411 #define CMP( src, reg) (AL | (0b000<<25) | (0b10101<<20) | (src<<16) | reg)
412 #define CMN( src, reg) (AL | (0b000<<25) | (0b10111<<20) | (src<<16) | reg)
413
414 #define LDRa(dst, base, off) (AL | (0b011<<25) | (0b1100<<21) | (1<<20) | base<<16 | dst<<12 | off)
415 #define LDRx(dst, base, off) (AL | (0b011<<25) | (0b1000<<21) | (1<<20) | base<<16 | dst<<12 | off)
416
417 #define LDRai(dst, base, off) (AL | (0b010<<25) | (0b1100<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
418 #define LDRxi(dst, base, off) (AL | (0b010<<25) | (0b1000<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
419 #define LDRxiw(dst, base, off) (AL | (0b010<<25) | (0b1001<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
420
421 #define LDRTa(dst, base, off) (AL | (0b011<<25) | (0b0101<<21) | (1<<20) | base<<16 | dst<<12 | off)
422 #define LDRTx(dst, base, off) (AL | (0b011<<25) | (0b0001<<21) | (1<<20) | base<<16 | dst<<12 | off)
423 #define LDRTai(dst, base, off) (AL | (0b010<<25) | (0b0101<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
424 #define LDRTxi(dst, base, off) (AL | (0b010<<25) | (0b0001<<21) | (1<<20) | base<<16 | dst<<12 | rimm(off))
425
426 #define LDRBa(dst, base, off) (AL | (0b011<<25) | (0b1110<<21) | (1<<20) | base<<16 | dst<<12 | off)
427 #define LDRSBai(dst, base, off) (AL | (0b000<<25) | (0b0110<<21) | (1<<20) | base<<16 | dst<<12 | ((off&0xF0)<<4)|0b1101<<4|(off&0x0F))
428 #define STRBa(dst, base, off) (AL | (0b011<<25) | (0b1110<<21) | (0<<20) | base<<16 | dst<<12 | off)
429
430 #define LDRHa(dst, base, off) (AL | (0b000<<25) | (0b1100<<21) | (1<<20) | base<<16 | dst<<12 | (0b1011<<4) | off)
431 #define LDRSHai(dst, base, off) (AL | (0b000<<25) | (0b1110<<21) | (1<<20) | base<<16 | dst<<12 | ((off&0xF0)<<4)|0b1111<<4|(off&0x0F))
432 #define STRHa(dst, base, off) (AL | (0b000<<25) | (0b1100<<21) | (0<<20) | base<<16 | dst<<12 | (0b1011<<4) | off)
433
434 #define STRa(dst, base, off) (AL | (0b011<<25) | (0b1100<<21) | (0<<20) | base<<16 | dst<<12 | off)
435 #define STRx(dst, base, off) (AL | (0b011<<25) | (0b1000<<21) | (0<<20) | base<<16 | dst<<12 | off)
436 #define STRai(dst, base, off) (AL | (0b010<<25) | (0b1100<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
437 #define STRxi(dst, base, off) (AL | (0b010<<25) | (0b1000<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
438 #define STRaiw(dst, base, off) (AL | (0b010<<25) | (0b1101<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
439 #define STRxiw(dst, base, off) (AL | (0b010<<25) | (0b1001<<21) | (0<<20) | base<<16 | dst<<12 | rimm(off))
440
441 // load with post-increment
442 #define POP1(reg) (AL | (0b010<<25) | (0b0100<<21) | (1<<20) | SP<<16 | reg<<12 | reg)
443 // store with post-increment
444 #define PUSH1(reg) (AL | (0b010<<25) | (0b1001<<21) | (0<<20) | SP<<16 | reg<<12 | 4)
445
446 // branch to target address (for small jumps)
447 #define Bi(i) \
448 (AL | (0b10)<<26 | (1<<25) /*I*/ | (0<<24) /*L*/ | (i))
449 // call subroutine
450 #define BLi(i) \
451 (AL | (0b10)<<26 | (1<<25) /*I*/ | (1<<24) /*L*/ | (i))
452 // branch and exchange (register)
453 #define BX(reg) \
454 (AL | 0b00010010<<20 | 0b1111<<16 | 0b1111<<12 | 0b1111<<8| 0b0001<<4 | reg)
455 // call subroutine (register)
456 #define BLX(reg) \
457 (AL | 0b00010010<<20 | 0b1111<<16 | 0b1111<<12 | 0b1111<<8| 0b0011<<4 | reg)
458
459 #define PUSH(mask) (AL | (0b100100<<22) | (0b10<<20) | (0b1101<<16) | mask)
460 #define PUSH2(r1, r2) (AL | (0b100100<<22) | (0b10<<20) | (0b1101<<16) | 1<<r1 | 1<<r2)
461 //#define PUSH1(reg) STRxiw(SP, reg, 4)
462
463 #define POP(mask) (0xe8bd0000|mask)
464
465 #define STM(base, regs) \
466 (AL | 0b100<<25 | 0<<24/*P*/| 0<<24/*U*/| 0<<24/*S*/| 0<<24/*W*/ | (base<<16) | (regs&~(1<<16)))
467
468 // note: op1 and op2 must not be the same
469 #define MUL(op1, op2, op3) \
470 (AL | 0b0000000<<21 | (1<<20) /*S*/ | (op1<<16) | (op3<<8) | 0b1001<<4 | (op2))
471
472 // puts integer in R0
473 #define emit_MOVR0i(arg) emit_MOVRxi(R0, arg)
474
475 // puts integer arg in register reg
476 #define emit_MOVRxi(reg, arg) do { \
477 emit(MOVW(reg, (arg&0xFFFF))); \
478 if (arg > 0xFFFF) \
479 emit(MOVT(reg, (((arg>>16)&0xFFFF)))); \
480 } while(0)
481
482 // puts integer arg in register reg. adds nop if only one instr is needed to
483 // make size constant
484 #define emit_MOVRxi_or_NOP(reg, arg) do { \
485 emit(MOVW(reg, (arg&0xFFFF))); \
486 if (arg > 0xFFFF) \
487 emit(MOVT(reg, (((arg>>16)&0xFFFF)))); \
488 else \
489 emit(NOP); \
490 } while(0)
491
492 // arm core register -> singe precision register
493 #define VMOVass(Vn, Rt) (AL|(0b1110<<24)|(0b000<<21)|(0<<20)| ((Vn>>1)<<16) | (Rt<<12) | (0b1010<<8) | ((Vn&1)<<7) | (1<<4))
494 // singe precision register -> arm core register
495 #define VMOVssa(Rt, Vn) (AL|(0b1110<<24)|(0b000<<21)|(1<<20)| ((Vn>>1)<<16) | (Rt<<12) | (0b1010<<8) | ((Vn&1)<<7) | (1<<4))
496
497 #define _VCVT_F(Vd, Vm, opc2, op) \
498 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b111<<19)|(opc2<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|(op<<7)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
499 #define VCVT_F32_U32(Sd, Sm) _VCVT_F(Sd, Sm, 0b000, 0 /* unsigned */)
500 #define VCVT_U32_F32(Sd, Sm) _VCVT_F(Sd, Sm, 0b100, 1 /* round zero */)
501 #define VCVT_F32_S32(Sd, Sm) _VCVT_F(Sd, Sm, 0b000, 1 /* unsigned */)
502 #define VCVT_S32_F32(Sd, Sm) _VCVT_F(Sd, Sm, 0b101, 1 /* round zero */)
503
504 #define VLDRa(Vd, Rn, i) (AL|(0b1101<<24)|1<<23|((Vd&1)<<22)|1<<20|(Rn<<16)|((Vd>>1)<<12)|(0b1010<<8)|off8(i))
505 #define VSTRa(Vd, Rn, i) (AL|(0b1101<<24)|1<<23|((Vd&1)<<22)|0<<20|(Rn<<16)|((Vd>>1)<<12)|(0b1010<<8)|off8(i))
506
507 #define VNEG_F32(Vd, Vm) \
508 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b11<<20)|(1<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
509
510 #define VADD_F32(Vd, Vn, Vm) \
511 (AL|(0b11100<<23)|((Vd&1)<<22)|(0b11<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|((Vn&1)<<7)|(0<<6)|((Vm&1)<<5)|(Vm>>1))
512 #define VSUB_F32(Vd, Vn, Vm) \
513 (AL|(0b11100<<23)|((Vd&1)<<22)|(0b11<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|((Vn&1)<<7)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
514 #define VMUL_F32(Vd, Vn, Vm) \
515 (AL|(0b11100<<23)|((Vd&1)<<22)|(0b10<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101)<<9|(0<<8)|((Vn&1)<<7)|(0<<6)|((Vm&1)<<5)|(Vm>>1))
516 #define VDIV_F32(Vd, Vn, Vm) \
517 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b00<<20)|((Vn>>1)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|((Vn&1)<<7)|(0<<6)|((Vm&1)<<5)|(Vm>>1))
518
519 #define _VCMP_F32(Vd, Vm, E) \
520 (AL|(0b11101<<23)|((Vd&1)<<22)|(0b11<<20)|((0b0100)<<16)|((Vd>>1)<<12)|(0b101<<9)|(0<<8)|(E<<7)|(1<<6)|((Vm&1)<<5)|(Vm>>1))
521 #define VCMP_F32(Vd, Vm) _VCMP_F32(Vd, Vm, 0)
522
523 #define VMRS(Rt) \
524 (AL|(0b11101111<<20)|(0b0001<<16)|(Rt<<12)|(0b1010<<8)|(1<<4))
525
526 // check if instruction in R0 is within range. Clobbers R1, R12
527 #define CHECK_JUMP do { \
528 static int bytes_to_skip = -1; \
529 static unsigned branch = -1; \
530 emit_MOVRxi(R1, (unsigned)vm->instructionCount); \
531 emit(CMP(R0, R1)); \
532 if (branch == -1) \
533 branch = vm->codeLength; \
534 emit(cond(LT, Bi(j_rel(bytes_to_skip)))); \
535 emit_MOVRxi_or_NOP(R12, (unsigned)ErrJump); \
536 emit(BLX(R12)); \
537 if (bytes_to_skip == -1) \
538 bytes_to_skip = vm->codeLength - branch; \
539 } while(0)
540
541 //#define CONST_OPTIMIZE
542 #ifdef CONST_OPTIMIZE
543 #define MAYBE_EMIT_CONST() \
544 if (got_const) \
545 { \
546 got_const = 0; \
547 vm->instructionPointers[instruction-1] = assembler_get_code_size(); \
548 STACK_PUSH(4); \
549 emit("movl $%d, (%%r9, %%rbx, 4)", const_value); \
550 }
551 #else
552 #define MAYBE_EMIT_CONST()
553 #endif
554
555 // optimize: use load multiple
556 #define IJ(comparator) do { \
557 MAYBE_EMIT_CONST(); \
558 emit_MOVRxi(R0, arg.i); \
559 CHECK_JUMP; \
560 emit(LDRTxi(R0, rOPSTACK, 4)); \
561 emit(LDRTxi(R1, rOPSTACK, 4)); \
562 emit(CMP(R1, R0)); \
563 emit(cond(comparator, Bi(j_rel(vm->instructionPointers[arg.i]-vm->codeLength)))); \
564 } while (0)
565
566 #define FJ(comparator) do { \
567 emit_MOVRxi(R0, arg.i); \
568 CHECK_JUMP; \
569 emit(SUBi(rOPSTACK, rOPSTACK, 8)); \
570 emit(VLDRa(S15, rOPSTACK, 4)); \
571 emit(VLDRa(S14, rOPSTACK, 8)); \
572 emit(VCMP_F32(S15, S14)); \
573 emit(VMRS(APSR_nzcv)); \
574 emit(cond(comparator, Bi(j_rel(vm->instructionPointers[arg.i]-vm->codeLength)))); \
575 } while (0)
576
577 #define printreg(reg) emit(PUSH1(R3)); emit(BLX(reg)); emit(POP1(R3));
578
579 static inline unsigned _j_rel(int x, int pc)
580 {
581 if (x&3) goto err;
582 x = (x>>2)-2;
583 if (x < 0)
584 {
585 if ((x&(0xFF<<24)) != 0xFF<<24)
586 goto err;
587 x &= ~(0xFF<<24);
588 }
589 else if (x&(0xFF<<24))
590 goto err;
591 return x;
592 err:
593 DIE("jump %d out of range at %d", x, pc);
594 }
595
596 void VM_Compile(vm_t *vm, vmHeader_t *header)
597 {
598 unsigned char *code;
599 int i_count, pc = 0;
600 int pass;
601 int codeoffsets[1024];
602
603 #define j_rel(x) (pass?_j_rel(x, pc):0xBAD)
604 #define OFFSET(i) (pass?(j_rel(codeoffsets[i]-vm->codeLength)):(0xF000000F))
605 #define new_offset() (offsidx++)
606 #define get_offset(i) (codeoffsets[i])
607 #define save_offset(i) (codeoffsets[i] = vm->codeLength)
608 #define OFF_CODE 0
609 #define OFF_IMMEDIATES 1
610
611 vm->compiled = qfalse;
612
613 vm->codeBase = NULL;
614 vm->codeLength = 0;
615
616 for (pass = 0; pass < 2; ++pass) {
617
618 int offsidx = 0;
619
620 // const optimization
621 unsigned got_const = 0, const_value = 0;
622
623 if(pass)
624 {
625 vm->codeBase = mmap(NULL, vm->codeLength, PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
626 if(vm->codeBase == MAP_FAILED)
627 Com_Error(ERR_FATAL, "VM_CompileARM: can't mmap memory");
628 vm->codeLength = 0;
629 }
630
631 //int (*entry)(vm_t*, int*, int*);
632 emit(PUSH((((1<<8)-1)<<4)|(1<<14))); // push R4-R11, LR
633 emit(SUBi(SP, SP, 12)); // align stack!
634 emit(LDRai(rCODEBASE, R0, offsetof(vm_t, codeBase)));
635 emit(LDRai(rDATABASE, R0, offsetof(vm_t, dataBase)));
636 emit(LDRai(rDATAMASK, R0, offsetof(vm_t, dataMask)));
637 emit(LDRai(rPSTACK, R1, 0));
638 emit(MOV(rOPSTACK, R2)); // TODO: reverse opstack to avoid writing to return address
639 emit(MOV(rOPSTACKBASE, rOPSTACK));
640
641 emit(BLi(OFFSET(OFF_CODE)));
642
643 // save return value in r0
644 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
645
646 emit(ADDi(SP, SP, 12)); // align stack!
647 emit(POP((((1<<8)-1)<<4)|(1<<15))); // pop R4-R11, LR -> PC
648
649 /* save some immediates here */
650 emit(BKPT(0));
651 emit(BKPT(0));
652 save_offset(OFF_IMMEDIATES);
653 // emit((unsigned)whatever);
654 emit(BKPT(0));
655 emit(BKPT(0));
656
657 save_offset(OFF_CODE);
658 offsidx = OFF_IMMEDIATES+1;
659
660 code = (unsigned char *) header + header->codeOffset;
661 pc = 0;
662
663 for (i_count = 0; i_count < header->instructionCount; i_count++) {
664 union {
665 unsigned char b[4];
666 unsigned int i;
667 } arg;
668 unsigned char op = code[pc++];
669
670 vm->instructionPointers[i_count] = vm->codeLength;
671
672 if (vm_opInfo[op] & opImm4)
673 {
674 memcpy(arg.b, &code[pc], 4);
675 pc += 4;
676 #ifdef EXCESSIVE_DEBUG
677 Com_Printf("%d: instruction %d (%s %d), offset %d\n", pass, i_count, opnames[op], arg.i, vm->codeLength);
678 #endif
679 }
680 else if (vm_opInfo[op] & opImm1)
681 {
682 arg.b[0] = code[pc];
683 ++pc;
684 #ifdef EXCESSIVE_DEBUG
685 Com_Printf("%d: instruction %d (%s %hhd), offset %d\n", pass, i_count, opnames[op], arg.i, vm->codeLength);
686 #endif
687 }
688 else
689 {
690 #ifdef EXCESSIVE_DEBUG
691 Com_Printf("%d: instruction %d (%s), offset %d\n", pass, i_count, opnames[op], vm->codeLength);
692 #endif
693 }
694
695 // TODO: for debug only
696 //emit_MOVRxi(R4, i_count);
697
698 switch ( op )
699 {
700 case OP_UNDEF:
701 break;
702
703 case OP_IGNORE:
704 NOTIMPL(op);
705 break;
706
707 case OP_BREAK:
708 emit(BKPT(0));
709 break;
710
711 case OP_ENTER:
712 MAYBE_EMIT_CONST();
713 emit(PUSH1(LR));
714 emit(SUBi(SP, SP, 12)); // align stack
715 if (arg.i == 0 || can_encode(arg.i))
716 {
717 emit(SUBi(rPSTACK, rPSTACK, arg.i)); // pstack -= arg
718 }
719 else
720 {
721 emit_MOVR0i(arg.i);
722 emit(SUB(rPSTACK, rPSTACK, R0)); // pstack -= arg
723 }
724 break;
725
726 case OP_LEAVE:
727 if (arg.i == 0 || can_encode(arg.i))
728 {
729 emit(ADDi(rPSTACK, rPSTACK, arg.i)); // pstack += arg
730 }
731 else
732 {
733 emit_MOVR0i(arg.i);
734 emit(ADD(rPSTACK, rPSTACK, R0)); // pstack += arg
735 }
736 emit(ADDi(SP, SP, 12));
737 emit(0xe49df004); // pop pc
738 break;
739
740 case OP_CALL:
741 #if 0
742 // save next instruction
743 emit_MOVR0i(i_count);
744 emit(STRa(R0, rDATABASE, rPSTACK)); // dataBase[pstack] = r0
745 #endif
746 if (got_const) {
747 NOTIMPL(op);
748 } else {
749 static int bytes_to_skip = -1;
750 static unsigned start_block = -1;
751 MAYBE_EMIT_CONST();
752 // get instruction nr from stack
753 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
754 emit(CMPi(R0, 0)); // check if syscall
755 if (start_block == -1)
756 start_block = vm->codeLength;
757 emit(cond(LT, Bi(j_rel(bytes_to_skip))));
758 CHECK_JUMP;
759 emit_MOVRxi_or_NOP(R1, (unsigned)vm->instructionPointers);
760 emit(LDRa(R0, R1, rLSL(2, R0))); // r0 = ((int*)r1)[r0]
761 emit(ADD(R0, rCODEBASE, R0)); // r0 = codeBase+r0
762 emit(BLX(R0));
763 emit(Bi(j_rel(vm->instructionPointers[i_count+1]-vm->codeLength)));
764 if (bytes_to_skip == -1)
765 bytes_to_skip = vm->codeLength - start_block;
766 emit(MOV(R1, rPSTACK));
767 emit_MOVRxi(R12, (unsigned)asmcall);
768 emit(BLX(R12));
769 // store return value
770 emit(STRaiw(R0, rOPSTACK, 4)); // opstack+=4; *opstack = r0
771 }
772 break;
773
774 case OP_PUSH:
775 MAYBE_EMIT_CONST();
776 emit(ADDi(rOPSTACK, rOPSTACK, 4));
777 break;
778
779 case OP_POP:
780 MAYBE_EMIT_CONST();
781 emit(SUBi(rOPSTACK, rOPSTACK, 4));
782 break;
783
784 case OP_CONST:
785 MAYBE_EMIT_CONST();
786 emit_MOVR0i(arg.i);
787 emit(STRaiw(R0, rOPSTACK, 4)); // opstack+=4; *opstack = r0
788 break;
789
790 case OP_LOCAL:
791 MAYBE_EMIT_CONST();
792 if (arg.i == 0 || can_encode(arg.i))
793 {
794 emit(ADDi(R0, rPSTACK, arg.i)); // r0 = pstack+arg
795 }
796 else
797 {
798 emit_MOVR0i(arg.i);
799 emit(ADD(R0, rPSTACK, R0)); // r0 = pstack+arg
800 }
801 emit(STRaiw(R0, rOPSTACK, 4)); // opstack+=4; *opstack = r0
802 break;
803
804 case OP_JUMP:
805 if(got_const) {
806 NOTIMPL(op);
807 } else {
808 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
809 CHECK_JUMP;
810 emit_MOVRxi(R1, (unsigned)vm->instructionPointers);
811 emit(LDRa(R0, R1, rLSL(2, R0))); // r0 = ((int*)r1)[r0]
812 emit(ADD(R0, rCODEBASE, R0)); // r0 = codeBase+r0
813 emit(BLX(R0));
814 }
815 break;
816
817 case OP_EQ:
818 IJ(EQ);
819 break;
820
821 case OP_NE:
822 IJ(NE);
823 break;
824
825 case OP_LTI:
826 IJ(LT);
827 break;
828
829 case OP_LEI:
830 IJ(LE);
831 break;
832
833 case OP_GTI:
834 IJ(GT);
835 break;
836
837 case OP_GEI:
838 IJ(GE);
839 break;
840
841 case OP_LTU:
842 IJ(LO);
843 break;
844
845 case OP_LEU:
846 IJ(LS);
847 break;
848
849 case OP_GTU:
850 IJ(HI);
851 break;
852
853 case OP_GEU:
854 IJ(HS);
855 break;
856
857 case OP_EQF:
858 FJ(EQ);
859 break;
860
861 case OP_NEF:
862 FJ(NE);
863 break;
864
865 case OP_LTF:
866 FJ(LT);
867 break;
868
869 case OP_LEF:
870 FJ(LE);
871 break;
872
873 case OP_GTF:
874 FJ(GT);
875 break;
876
877 case OP_GEF:
878 FJ(GE);
879 break;
880
881 case OP_LOAD1:
882 MAYBE_EMIT_CONST();
883 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
884 emit(AND(R0, rDATAMASK, R0)); // r0 = r0 & rDATAMASK
885 emit(LDRBa(R0, rDATABASE, R0)); // r0 = (unsigned char)dataBase[r0]
886 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
887 break;
888
889 case OP_LOAD2:
890 MAYBE_EMIT_CONST();
891 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
892 emit(AND(R0, rDATAMASK, R0)); // r0 = r0 & rDATAMASK
893 emit(LDRHa(R0, rDATABASE, R0)); // r0 = (unsigned short)dataBase[r0]
894 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
895 break;
896
897 case OP_LOAD4:
898 MAYBE_EMIT_CONST();
899 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
900 emit(AND(R0, rDATAMASK, R0)); // r0 = r0 & rDATAMASK
901 emit(LDRa(R0, rDATABASE, R0)); // r0 = dataBase[r0]
902 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
903 break;
904
905 case OP_STORE1:
906 MAYBE_EMIT_CONST();
907 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
908 emit(LDRTxi(R1, rOPSTACK, 4)); // r1 = *opstack; rOPSTACK -= 4
909 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
910 emit(STRBa(R0, rDATABASE, R1)); // database[r1] = r0
911 break;
912
913 case OP_STORE2:
914 MAYBE_EMIT_CONST();
915 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
916 emit(LDRTxi(R1, rOPSTACK, 4)); // r1 = *opstack; rOPSTACK -= 4
917 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
918 emit(STRHa(R0, rDATABASE, R1)); // database[r1] = r0
919 break;
920
921 case OP_STORE4:
922 MAYBE_EMIT_CONST();
923 // optimize: use load multiple
924 // value
925 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
926 // pointer
927 emit(LDRTxi(R1, rOPSTACK, 4)); // r1 = *opstack; rOPSTACK -= 4
928 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
929 // store value at pointer
930 emit(STRa(R0, rDATABASE, R1)); // database[r1] = r0
931 break;
932
933 case OP_ARG:
934 MAYBE_EMIT_CONST();
935 emit(LDRTxi(R0, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
936 emit(ADDi(R1, rPSTACK, arg.b[0])); // r1 = programStack+arg
937 emit(AND(R1, rDATAMASK, R1)); // r1 = r1 & rDATAMASK
938 emit(STRa(R0, rDATABASE, R1)); // dataBase[r1] = r0
939 break;
940
941 case OP_BLOCK_COPY:
942 MAYBE_EMIT_CONST();
943 emit(LDRTxi(R1, rOPSTACK, 4)); // r0 = *opstack; rOPSTACK -= 4
944 emit(LDRTxi(R0, rOPSTACK, 4));
945 emit_MOVRxi(R2, arg.i);
946 emit_MOVRxi(R12, (unsigned)VM_BlockCopy);
947 emit(BLX(R12));
948 break;
949
950 case OP_SEX8:
951 MAYBE_EMIT_CONST();
952 emit(LDRSBai(R0, rOPSTACK, 0)); // sign extend *opstack
953 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
954 break;
955
956 case OP_SEX16:
957 MAYBE_EMIT_CONST();
958 emit(LDRSHai(R0, rOPSTACK, 0)); // sign extend *opstack
959 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
960 break;
961
962 case OP_NEGI:
963 MAYBE_EMIT_CONST();
964 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
965 emit(RSBi(R0, R0, 0)); // r0 = -r0
966 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
967 break;
968
969 case OP_ADD:
970 MAYBE_EMIT_CONST();
971 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
972 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
973 emit(ADD(R0, R1, R0)); // r0 = r1 + r0
974 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
975 break;
976
977 case OP_SUB:
978 MAYBE_EMIT_CONST();
979 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
980 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
981 emit(SUB(R0, R1, R0)); // r0 = r1 - r0
982 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
983 break;
984
985 case OP_DIVI:
986 case OP_DIVU:
987 MAYBE_EMIT_CONST();
988 emit(LDRai(R1, rOPSTACK, 0)); // r1 = *opstack
989 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r0 = *opstack
990 if ( op == OP_DIVI )
991 emit_MOVRxi(R12, (unsigned)__aeabi_idiv);
992 else
993 emit_MOVRxi(R12, (unsigned)__aeabi_uidiv);
994 emit(BLX(R12));
995 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
996 break;
997
998 case OP_MODI:
999 case OP_MODU:
1000 MAYBE_EMIT_CONST();
1001 emit(LDRai(R1, rOPSTACK, 0)); // r1 = *opstack
1002 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r0 = *opstack
1003 if ( op == OP_MODI )
1004 emit_MOVRxi(R12, (unsigned)__aeabi_idivmod);
1005 else
1006 emit_MOVRxi(R12, (unsigned)__aeabi_uidivmod);
1007 emit(BLX(R12));
1008 emit(STRai(R1, rOPSTACK, 0)); // *opstack = r1
1009 break;
1010
1011 case OP_MULI:
1012 case OP_MULU:
1013 MAYBE_EMIT_CONST();
1014 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1015 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1016 emit(MUL(R0, R1, R0)); // r0 = r1 * r0
1017 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1018 break;
1019
1020 case OP_BAND:
1021 MAYBE_EMIT_CONST();
1022 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1023 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1024 emit(AND(R0, R1, R0)); // r0 = r1 & r0
1025 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1026 break;
1027
1028 case OP_BOR:
1029 MAYBE_EMIT_CONST();
1030 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1031 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1032 emit(ORR(R0, R1, R0)); // r0 = r1 | r0
1033 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1034 break;
1035
1036 case OP_BXOR:
1037 MAYBE_EMIT_CONST();
1038 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1039 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1040 emit(EOR(R0, R1, R0)); // r0 = r1 ^ r0
1041 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1042 break;
1043
1044 case OP_BCOM:
1045 MAYBE_EMIT_CONST();
1046 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1047 emit(MVN(R0, R0)); // r0 = ~r0
1048 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1049 break;
1050
1051 case OP_LSH:
1052 MAYBE_EMIT_CONST();
1053 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1054 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1055 emit(LSL(R0, R1, R0)); // r0 = r1 << r0
1056 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1057 break;
1058
1059 case OP_RSHI:
1060 MAYBE_EMIT_CONST();
1061 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1062 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1063 emit(ASR(R0, R1, R0)); // r0 = r1 >> r0
1064 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1065 break;
1066
1067 case OP_RSHU:
1068 MAYBE_EMIT_CONST();
1069 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1070 emit(LDRxiw(R1, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1071 emit(LSR(R0, R1, R0)); // r0 = (unsigned)r1 >> r0
1072 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1073 break;
1074
1075 case OP_NEGF:
1076 MAYBE_EMIT_CONST();
1077 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1078 emit(VNEG_F32(S14, S14)); // s15 = -s14
1079 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1080 break;
1081
1082 case OP_ADDF:
1083 MAYBE_EMIT_CONST();
1084 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1085 // vldr can't modify rOPSTACK so
1086 // we'd either need to change it
1087 // with sub or use regular ldr+vmov
1088 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1089 emit(VMOVass(S15,R0)); // s15 = r0
1090 emit(VADD_F32(S14, S15, S14)); // s14 = s14 + s15
1091 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1092 break;
1093
1094 case OP_SUBF:
1095 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1096 // see OP_ADDF
1097 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1098 emit(VMOVass(S15,R0)); // s15 = r0
1099 emit(VSUB_F32(S14, S15, S14)); // s14 = s14 - s15
1100 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1101 break;
1102
1103 case OP_DIVF:
1104 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1105 // see OP_ADDF
1106 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1107 emit(VMOVass(S15,R0)); // s15 = r0
1108 emit(VDIV_F32(S14, S15, S14)); // s14 = s14 / s15
1109 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1110 break;
1111
1112 case OP_MULF:
1113 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1114 // see OP_ADDF
1115 emit(LDRxiw(R0, rOPSTACK, 4)); // opstack-=4; r1 = *opstack
1116 emit(VMOVass(S15,R0)); // s15 = r0
1117 emit(VMUL_F32(S14, S15, S14)); // s14 = s14 * s15
1118 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1119 break;
1120
1121 case OP_CVIF:
1122 MAYBE_EMIT_CONST();
1123 emit(LDRai(R0, rOPSTACK, 0)); // r0 = *opstack
1124 emit(VMOVass(S14,R0)); // s14 = r0
1125 emit(VCVT_F32_S32(S14, S14)); // s15 = (float)s14
1126 emit(VSTRa(S14, rOPSTACK, 0)); // *((float*)opstack) = s15
1127 break;
1128
1129 case OP_CVFI:
1130 MAYBE_EMIT_CONST();
1131 emit(VLDRa(S14, rOPSTACK, 0)); // s14 = *((float*)opstack)
1132 emit(VCVT_S32_F32(S14, S14)); // s15 = (int)s14
1133 emit(VMOVssa(R0,S14)); // s14 = r0
1134 emit(STRai(R0, rOPSTACK, 0)); // *opstack = r0
1135 break;
1136 }
1137 }
1138
1139 // never reached
1140 emit(BKPT(0));
1141 } // pass
1142
1143 if (mprotect(vm->codeBase, vm->codeLength, PROT_READ|PROT_EXEC/* |PROT_WRITE */)) {
1144 VM_Destroy_Compiled(vm);
1145 DIE("mprotect failed");
1146 }
1147
1148 // clear icache, http://blogs.arm.com/software-enablement/141-caches-and-self-modifying-code/
1149 __clear_cache(vm->codeBase, vm->codeBase+vm->codeLength);
1150
1151 vm->destroy = VM_Destroy_Compiled;
1152 vm->compiled = qtrue;
1153 }
1154
1155 int VM_CallCompiled(vm_t *vm, int *args)
1156 {
1157 byte stack[OPSTACK_SIZE + 15];
1158 int *opStack;
1159 int programStack = vm->programStack;
1160 int stackOnEntry = programStack;
1161 byte *image = vm->dataBase;
1162 int *argPointer;
1163 int retVal;
1164
1165 currentVM = vm;
1166
1167 vm->currentlyInterpreting = qtrue;
1168
1169 programStack -= ( 8 + 4 * MAX_VMMAIN_ARGS );
1170 argPointer = (int *)&image[ programStack + 8 ];
1171 memcpy( argPointer, args, 4 * MAX_VMMAIN_ARGS );
1172 argPointer[-1] = 0;
1173 argPointer[-2] = -1;
1174
1175
1176 opStack = PADP(stack, 16);
1177 *opStack = 0xDEADBEEF;
1178
1179 #if 0
1180 Com_Printf("r5 opStack:\t\t%p\n", opStack);
1181 Com_Printf("r7 codeBase:\t\t%p\n", vm->codeBase);
1182 Com_Printf("r8 programStack:\t0x%x\n", programStack);
1183 Com_Printf("r9 dataBase:\t\t%p\n", vm->dataBase);
1184 #endif
1185
1186 /* call generated code */
1187 {
1188 //int (*entry)(void *, int, void *, int);
1189 int (*entry)(vm_t*, int*, int*);
1190
1191 entry = (void *)(vm->codeBase);
1192 //__asm__ volatile("bkpt");
1193 //retVal = entry(vm->codeBase, programStack, vm->dataBase, vm->dataMask);
1194 retVal = entry(vm, &programStack, opStack);
1195 }
1196
1197 if(*opStack != 0xDEADBEEF)
1198 {
1199 Com_Error(ERR_DROP, "opStack corrupted in compiled code");
1200 }
1201
1202 if(programStack != stackOnEntry - (8 + 4 * MAX_VMMAIN_ARGS))
1203 Com_Error(ERR_DROP, "programStack corrupted in compiled code");
1204
1205 vm->programStack = stackOnEntry;
1206 vm->currentlyInterpreting = qfalse;
1207
1208 return retVal;
1209 }
925925
926926 }
927927
928
929 #define MAC_EVENT_PUMP_MSEC 5
930928
931929 /*
932930 ==================
127127
128128 /*
129129 ============
130 R_GetCommandBuffer
130 R_GetCommandBufferReserved
131131
132132 make sure there is enough command space
133133 ============
134134 */
135 void *R_GetCommandBuffer( int bytes ) {
135 void *R_GetCommandBufferReserved( int bytes, int reservedBytes ) {
136136 renderCommandList_t *cmdList;
137137
138138 cmdList = &backEndData->commands;
139139 bytes = PAD(bytes, sizeof(void *));
140140
141141 // always leave room for the end of list command
142 if ( cmdList->used + bytes + 4 > MAX_RENDER_COMMANDS ) {
143 if ( bytes > MAX_RENDER_COMMANDS - 4 ) {
142 if ( cmdList->used + bytes + sizeof( int ) + reservedBytes > MAX_RENDER_COMMANDS ) {
143 if ( bytes > MAX_RENDER_COMMANDS - sizeof( int ) ) {
144144 ri.Error( ERR_FATAL, "R_GetCommandBuffer: bad size %i", bytes );
145145 }
146146 // if we run out of room, just start dropping commands
150150 cmdList->used += bytes;
151151
152152 return cmdList->cmds + cmdList->used - bytes;
153 }
154
155
156 /*
157 =============
158 R_GetCommandBuffer
159
160 returns NULL if there is not enough space for important commands
161 =============
162 */
163 void *R_GetCommandBuffer( int bytes ) {
164 return R_GetCommandBufferReserved( bytes, PAD( sizeof( swapBuffersCommand_t ), sizeof(void *) ) );
153165 }
154166
155167
630642 if ( !tr.registered ) {
631643 return;
632644 }
633 cmd = R_GetCommandBuffer( sizeof( *cmd ) );
645 cmd = R_GetCommandBufferReserved( sizeof( *cmd ), 0 );
634646 if ( !cmd ) {
635647 return;
636648 }
21222122 // we still need to add it for hyperspace cases
21232123 R_AddDrawSurfCmd( drawSurfs, numDrawSurfs );
21242124 return;
2125 }
2126
2127 // if we overflowed MAX_DRAWSURFS, the drawsurfs
2128 // wrapped around in the buffer and we will be missing
2129 // the first surfaces, not the last ones
2130 if ( numDrawSurfs > MAX_DRAWSURFS ) {
2131 numDrawSurfs = MAX_DRAWSURFS;
21322125 }
21332126
21342127 // sort the drawsurfs by sort type, then orientation, then shader
23702363 */
23712364 void R_RenderView( viewParms_t *parms ) {
23722365 int firstDrawSurf;
2366 int numDrawSurfs;
23732367
23742368 if ( parms->viewportWidth <= 0 || parms->viewportHeight <= 0 ) {
23752369 return;
23922386
23932387 R_GenerateDrawSurfs();
23942388
2395 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, tr.refdef.numDrawSurfs - firstDrawSurf );
2389 // if we overflowed MAX_DRAWSURFS, the drawsurfs
2390 // wrapped around in the buffer and we will be missing
2391 // the first surfaces, not the last ones
2392 numDrawSurfs = tr.refdef.numDrawSurfs;
2393 if ( numDrawSurfs > MAX_DRAWSURFS ) {
2394 numDrawSurfs = MAX_DRAWSURFS;
2395 }
2396
2397 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, numDrawSurfs - firstDrawSurf );
23962398
23972399 // draw main system development information (surface outlines, etc)
23982400 R_FogOff();
109109
110110 if ( bundle->numImageAnimations <= 1 ) {
111111 if ( bundle->isLightmap && ( backEnd.refdef.rdflags & RDF_SNOOPERVIEW ) ) {
112 GL_BindToTMU( tr.whiteImage, 0 );
112 GL_BindToTMU( tr.whiteImage, tmu );
113113 } else {
114114 GL_BindToTMU( bundle->image[0], tmu);
115115 }
127127 index %= bundle->numImageAnimations;
128128
129129 if ( bundle->isLightmap && ( backEnd.refdef.rdflags & RDF_SNOOPERVIEW ) ) {
130 GL_BindToTMU( tr.whiteImage, 0 );
130 GL_BindToTMU( tr.whiteImage, tmu );
131131 } else {
132132 GL_BindToTMU( bundle->image[ index ], tmu );
133133 }
13801380 GLSL_SetUniformFloat(sp, UNIFORM_FOGEYET, eyeT);
13811381 }
13821382
1383 GL_State( pStage->stateBits );
1384
13851383 {
13861384 vec4_t baseColor;
13871385 vec4_t vertColor;
1386 int fadeStart, fadeEnd;
13881387
13891388 ComputeShaderColors(pStage, baseColor, vertColor, pStage->stateBits);
1389
1390 //----(SA) fading model stuff
1391 if ( backEnd.currentEntity )
1392 {
1393 fadeStart = backEnd.currentEntity->e.fadeStartTime;
1394 }
1395 else
1396 {
1397 fadeStart = 0;
1398 }
1399
1400 if ( fadeStart )
1401 {
1402 fadeEnd = backEnd.currentEntity->e.fadeEndTime;
1403
1404 if ( fadeStart > tr.refdef.time )
1405 {
1406 // has not started to fade yet
1407 GL_State( pStage->stateBits );
1408 }
1409 else
1410 {
1411 unsigned int tempState;
1412 float alphaval;
1413
1414 if ( fadeEnd < tr.refdef.time )
1415 {
1416 // entity faded out completely
1417 continue;
1418 }
1419
1420 alphaval = (float)( fadeEnd - tr.refdef.time ) / (float)( fadeEnd - fadeStart );
1421
1422 tempState = pStage->stateBits;
1423 // remove the current blend, and don't write to Z buffer
1424 tempState &= ~( GLS_SRCBLEND_BITS | GLS_DSTBLEND_BITS | GLS_DEPTHMASK_TRUE );
1425 // set the blend to src_alpha, dst_one_minus_src_alpha
1426 tempState |= ( GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA );
1427 GL_State( tempState );
1428 GL_Cull( CT_FRONT_SIDED );
1429 // modulate the alpha component of each vertex in the render list
1430 baseColor[3] *= alphaval;
1431 vertColor[3] *= alphaval;
1432 }
1433 }
1434 else
1435 {
1436 GL_State( pStage->stateBits );
1437 }
1438 //----(SA) end
13901439
13911440 if ((backEnd.refdef.colorScale != 1.0f) && !(backEnd.refdef.rdflags & RDF_NOWORLDMODEL))
13921441 {
854854
855855 }
856856
857
858 #define MAC_EVENT_PUMP_MSEC 5
859857
860858 /*
861859 ==================
120120
121121 /*
122122 ============
123 R_GetCommandBuffer
123 R_GetCommandBufferReserved
124124
125125 make sure there is enough command space
126126 ============
127127 */
128 void *R_GetCommandBuffer( int bytes ) {
128 void *R_GetCommandBufferReserved( int bytes, int reservedBytes ) {
129129 renderCommandList_t *cmdList;
130130
131131 cmdList = &backEndData->commands;
132132 bytes = PAD(bytes, sizeof(void *));
133133
134134 // always leave room for the end of list command
135 if ( cmdList->used + bytes + 4 > MAX_RENDER_COMMANDS ) {
136 if ( bytes > MAX_RENDER_COMMANDS - 4 ) {
135 if ( cmdList->used + bytes + sizeof( int ) + reservedBytes > MAX_RENDER_COMMANDS ) {
136 if ( bytes > MAX_RENDER_COMMANDS - sizeof( int ) ) {
137137 ri.Error( ERR_FATAL, "R_GetCommandBuffer: bad size %i", bytes );
138138 }
139139 // if we run out of room, just start dropping commands
143143 cmdList->used += bytes;
144144
145145 return cmdList->cmds + cmdList->used - bytes;
146 }
147
148
149 /*
150 =============
151 R_GetCommandBuffer
152
153 returns NULL if there is not enough space for important commands
154 =============
155 */
156 void *R_GetCommandBuffer( int bytes ) {
157 return R_GetCommandBufferReserved( bytes, PAD( sizeof( swapBuffersCommand_t ), sizeof(void *) ) );
146158 }
147159
148160
571583 if ( !tr.registered ) {
572584 return;
573585 }
574 cmd = R_GetCommandBuffer( sizeof( *cmd ) );
586 cmd = R_GetCommandBufferReserved( sizeof( *cmd ), 0 );
575587 if ( !cmd ) {
576588 return;
577589 }
14861486 // we still need to add it for hyperspace cases
14871487 R_AddDrawSurfCmd( drawSurfs, numDrawSurfs );
14881488 return;
1489 }
1490
1491 // if we overflowed MAX_DRAWSURFS, the drawsurfs
1492 // wrapped around in the buffer and we will be missing
1493 // the first surfaces, not the last ones
1494 if ( numDrawSurfs > MAX_DRAWSURFS ) {
1495 numDrawSurfs = MAX_DRAWSURFS;
14961489 }
14971490
14981491 // sort the drawsurfs by sort type, then orientation, then shader
17341727 */
17351728 void R_RenderView( viewParms_t *parms ) {
17361729 int firstDrawSurf;
1730 int numDrawSurfs;
17371731
17381732 if ( parms->viewportWidth <= 0 || parms->viewportHeight <= 0 ) {
17391733 return;
17561750
17571751 R_GenerateDrawSurfs();
17581752
1759 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, tr.refdef.numDrawSurfs - firstDrawSurf );
1753 // if we overflowed MAX_DRAWSURFS, the drawsurfs
1754 // wrapped around in the buffer and we will be missing
1755 // the first surfaces, not the last ones
1756 numDrawSurfs = tr.refdef.numDrawSurfs;
1757 if ( numDrawSurfs > MAX_DRAWSURFS ) {
1758 numDrawSurfs = MAX_DRAWSURFS;
1759 }
1760
1761 R_SortDrawSurfs( tr.refdef.drawSurfs + firstDrawSurf, numDrawSurfs - firstDrawSurf );
17601762
17611763 // draw main system development information (surface outlines, etc)
17621764 R_FogOff();
428428 else
429429 perChannelColorBits = 4;
430430
431 #ifndef USE_OPENGLES
432431 #ifdef __sgi /* Fix for SGIs grabbing too many bits of color */
433432 if (perChannelColorBits == 4)
434433 perChannelColorBits = 0; /* Use minimum size for 16-bit color */
435434
436435 /* Need alpha or else SGIs choose 36+ bit RGB mode */
437 SDL_GL_SetAttribute( SDL_GL_ALPHA_SIZE, 1);
436 SDL_GL_SetAttribute( SDL_GL_ALPHA_SIZE, 1 );
437 #endif
438
439 #ifdef USE_OPENGLES
440 SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 1 );
438441 #endif
439442
440443 SDL_GL_SetAttribute( SDL_GL_RED_SIZE, perChannelColorBits );
449452 if(r_stereoEnabled->integer)
450453 {
451454 glConfig.stereoEnabled = qtrue;
452 SDL_GL_SetAttribute(SDL_GL_STEREO, 1);
455 SDL_GL_SetAttribute( SDL_GL_STEREO, 1 );
453456 }
454457 else
455458 {
456459 glConfig.stereoEnabled = qfalse;
457 SDL_GL_SetAttribute(SDL_GL_STEREO, 0);
460 SDL_GL_SetAttribute( SDL_GL_STEREO, 0 );
458461 }
459462
460463 SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );
521524 continue;
522525 }
523526 }
524 #else
525 SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1);
526
527 if( ( SDL_window = SDL_CreateWindow( CLIENT_WINDOW_TITLE, x, y,
528 glConfig.vidWidth, glConfig.vidHeight, flags ) ) == NULL )
529 {
530 ri.Printf( PRINT_DEVELOPER, "SDL_CreateWindow failed: %s\n", SDL_GetError( ) );
531 continue;
532 }
533 #endif // USE_OPENGLES
534527
535528 SDL_SetWindowIcon( SDL_window, icon );
536529
1616 RASPBERRY_PI=1 \
1717 USE_MUMBLE=0 \
1818 BUILD_GAME_SO=1 \
19 BUILD_GAME_QVM=0 \
2019 BUILD_RENDERER_REND2=0 \
2120 ARCH=arm \
2221 PLATFORM=linux \